Exemplo n.º 1
0
    def defineSchema(self, refSchema):

        self.mapper = SchemaMapper(refSchema)
        self.mapper.addMinimalSchema(SourceCatalog.Table.makeMinimalSchema(),
                                     True)
        schema = self.mapper.getOutputSchema()
        self.mKey = schema.addField("m",
                                    doc="template m",
                                    type="ArrayF",
                                    size=self.bfd.BFDConfig.MXYSIZE)
        self.dmKey = schema.addField("dm",
                                     doc="template m",
                                     type="ArrayF",
                                     size=self.bfd.BFDConfig.MSIZE *
                                     self.bfd.BFDConfig.DSIZE)
        self.dxyKey = schema.addField("dxy",
                                      doc="template m",
                                      type="ArrayF",
                                      size=self.bfd.BFDConfig.XYSIZE *
                                      self.bfd.BFDConfig.DSIZE)
        self.ndaKey = schema.addField("nda", doc="nda", type=np.float)
        self.idKey = schema.addField("bfd_id", doc="id", type=np.int64)
        if self.config.zFile:
            self.zKey = schema.addField("z", doc="redshift", type=np.float)
            # self.zIdKey = schema.addField("z_id", doc="redshift", type=np.int64)

        return schema
Exemplo n.º 2
0
def combineWithForce(meas, force):
    """Combine the meas and forced_src catalogs."""
    if len(meas) != len(force):
        raise Exception("# Meas and Forced_src catalogs should have " +
                        "the same size!")
    mapper = SchemaMapper(meas.schema)
    mapper.addMinimalSchema(meas.schema)
    newSchema = mapper.getOutputSchema()
    # Add new fields
    newSchema.addField('force.deblend.nchild', type=int)
    newSchema.addField('force.classification.extendedness', type=float)
    newSchema.addField('force.flux.kron', type=float)
    newSchema.addField('force.flux.kron.err', type=float)
    newSchema.addField('force.flux.psf', type=float)
    newSchema.addField('force.flux.psf.err', type=float)
    newSchema.addField('force.flux.kron.apcorr', type=float)
    newSchema.addField('force.flux.kron.apcorr.err', type=float)
    newSchema.addField('force.flux.psf.apcorr', type=float)
    newSchema.addField('force.flux.psf.apcorr.err', type=float)
    newSchema.addField('force.cmodel.flux', type=float)
    newSchema.addField('force.cmodel.flux.err', type=float)
    newSchema.addField('force.cmodel.fracDev', type=float)
    newSchema.addField('force.cmodel.exp.flux', type=float)
    newSchema.addField('force.cmodel.exp.flux.err', type=float)
    newSchema.addField('force.cmodel.dev.flux', type=float)
    newSchema.addField('force.cmodel.dev.flux.err', type=float)
    newSchema.addField('force.cmodel.flux.apcorr', type=float)
    newSchema.addField('force.cmodel.flux.apcorr.err', type=float)
    newSchema.addField('force.cmodel.exp.flux.apcorr', type=float)
    newSchema.addField('force.cmodel.exp.flux.apcorr.err', type=float)
    newSchema.addField('force.cmodel.dev.flux.apcorr', type=float)
    newSchema.addField('force.cmodel.dev.flux.apcorr.err', type=float)

    newCols = ['deblend.nchild', 'classification.extendedness',
               'flux.kron', 'flux.kron.err',
               'flux.psf', 'flux.psf.err',
               'flux.kron.apcorr', 'flux.kron.apcorr.err',
               'flux.psf.apcorr', 'flux.psf.apcorr.err',
               'cmodel.flux', 'cmodel.flux.err',
               'cmodel.flux', 'cmodel.flux.err',
               'cmodel.flux.apcorr', 'cmodel.flux.apcorr.err',
               'cmodel.exp.flux', 'cmodel.exp.flux.err',
               'cmodel.exp.flux.apcorr', 'cmodel.exp.flux.apcorr.err',
               'cmodel.dev.flux', 'cmodel.dev.flux.err',
               'cmodel.dev.flux.apcorr', 'cmodel.dev.flux.apcorr.err',
               'cmodel.fracDev']
    combSrc = SourceCatalog(newSchema)
    combSrc.extend(meas, mapper=mapper)

    for key in newCols:
        combSrc['force.' + key][:] = force[key][:]

    for name in ("Centroid", "Shape"):
        val = getattr(meas.table, "get" + name + "Key")()
        err = getattr(meas.table, "get" + name + "ErrKey")()
        flag = getattr(meas.table, "get" + name + "FlagKey")()
        getattr(combSrc.table, "define" + name)(val, err, flag)

    return combSrc
Exemplo n.º 3
0
def combineWithForce(meas, force):
    """Combine the meas and forced_src catalogs."""
    if len(meas) != len(force):
        raise Exception("# Meas and Forced_src catalogs should have " +
                        "the same size!")
    mapper = SchemaMapper(meas.schema)
    mapper.addMinimalSchema(meas.schema)
    newSchema = mapper.getOutputSchema()
    # Add new fields
    newSchema.addField('force.deblend.nchild', type=int)
    newSchema.addField('force.classification.extendedness', type=float)
    newSchema.addField('force.flux.kron', type=float)
    newSchema.addField('force.flux.kron.err', type=float)
    newSchema.addField('force.flux.psf', type=float)
    newSchema.addField('force.flux.psf.err', type=float)
    newSchema.addField('force.flux.kron.apcorr', type=float)
    newSchema.addField('force.flux.kron.apcorr.err', type=float)
    newSchema.addField('force.flux.psf.apcorr', type=float)
    newSchema.addField('force.flux.psf.apcorr.err', type=float)
    newSchema.addField('force.cmodel.flux', type=float)
    newSchema.addField('force.cmodel.flux.err', type=float)
    newSchema.addField('force.cmodel.fracDev', type=float)
    newSchema.addField('force.cmodel.exp.flux', type=float)
    newSchema.addField('force.cmodel.exp.flux.err', type=float)
    newSchema.addField('force.cmodel.dev.flux', type=float)
    newSchema.addField('force.cmodel.dev.flux.err', type=float)
    newSchema.addField('force.cmodel.flux.apcorr', type=float)
    newSchema.addField('force.cmodel.flux.apcorr.err', type=float)
    newSchema.addField('force.cmodel.exp.flux.apcorr', type=float)
    newSchema.addField('force.cmodel.exp.flux.apcorr.err', type=float)
    newSchema.addField('force.cmodel.dev.flux.apcorr', type=float)
    newSchema.addField('force.cmodel.dev.flux.apcorr.err', type=float)

    newCols = ['deblend.nchild', 'classification.extendedness',
               'flux.kron', 'flux.kron.err',
               'flux.psf', 'flux.psf.err',
               'flux.kron.apcorr', 'flux.kron.apcorr.err',
               'flux.psf.apcorr', 'flux.psf.apcorr.err',
               'cmodel.flux', 'cmodel.flux.err',
               'cmodel.flux', 'cmodel.flux.err',
               'cmodel.flux.apcorr', 'cmodel.flux.apcorr.err',
               'cmodel.exp.flux', 'cmodel.exp.flux.err',
               'cmodel.exp.flux.apcorr', 'cmodel.exp.flux.apcorr.err',
               'cmodel.dev.flux', 'cmodel.dev.flux.err',
               'cmodel.dev.flux.apcorr', 'cmodel.dev.flux.apcorr.err',
               'cmodel.fracDev']
    combSrc = SourceCatalog(newSchema)
    combSrc.extend(meas, mapper=mapper)

    for key in newCols:
        combSrc['force.' + key][:] = force[key][:]

    for name in ("Centroid", "Shape"):
        val = getattr(meas.table, "get" + name + "Key")()
        err = getattr(meas.table, "get" + name + "ErrKey")()
        flag = getattr(meas.table, "get" + name + "FlagKey")()
        getattr(combSrc.table, "define" + name)(val, err, flag)

    return combSrc
Exemplo n.º 4
0
def create_source_catalog_from_text_and_butler(repo_dir, info, dataset='src'):
    butler = dafPersistence.Butler(repo_dir)
    schema = butler.get(dataset + "_schema", immediate=True).schema
    mapper = SchemaMapper(schema)
    mapper.addMinimalSchema(schema)
    newSchema = mapper.getOutputSchema()

    src_cat = SourceCatalog(newSchema)
    for row in info:
        record = src_cat.addNew()
        record.set('coord_ra', Angle(row['RA']*degrees))
        record.set('coord_dec', Angle(row['Dec']*degrees))

    print(src_cat['coord_ra'], src_cat['coord_dec'])
    return(src_cat)
Exemplo n.º 5
0
    def defineSchema(self, refSchema):

        self.mapper = SchemaMapper(refSchema)
        self.mapper.addMinimalSchema(SourceCatalog.Table.makeMinimalSchema(), True)
        schema = self.mapper.getOutputSchema()

        self.even = schema.addField('bfd_even', type="ArrayF",
                                    size=self.n_even, doc="Even Bfd moments")
        self.odd = schema.addField('bfd_odd', type="ArrayF",
                                   size=self.n_odd, doc="odd moments")
        self.shift = schema.addField('bfd_shift', type="ArrayF",
                                     size=2, doc="amount shifted to null moments")
        self.cov_even = schema.addField('bfd_cov_even', type="ArrayF",
                                        size=self.n_even*(self.n_even+1)//2,
                                        doc="even moment covariance matrix")
        self.cov_odd = schema.addField('bfd_cov_odd', type="ArrayF",
                                       size=self.n_odd*(self.n_odd+1)//2,
                                       doc="odd moment covariance matrix")
        self.flag = schema.addField('bfd_flag', type="Flag", doc="Set to 1 for any fatal failure")
        self.centroid_flag = schema.addField('bfd_flag_centroid', type="Flag",
                                             doc="Set to 1 for any fatal failure of centroid")
        self.parent_flag = schema.addField('bfd_flag_parent', type="Flag",
                                            doc="Set to 1 for parents")
        if self.config.add_single_bands:
            self.filter_keys = defaultdict(dict)
            self.n_even_single = self.n_even - len(self.config.filters) + 1
            self.n_odd_single = self.n_odd
            for band in self.config.filters:
                self.filter_keys[band]['even'] = schema.addField(f'bfd_even_{band}', type="ArrayF",
                                                                 size=self.n_even_single,
                                                                 doc=f"Even Bfd moments for filter {band}")
                self.filter_keys[band]['odd'] = schema.addField(f'bfd_odd_{band}', type="ArrayF",
                                                                size=self.n_odd_single,
                                                                doc=f"Odd Bfd moments for filter {band}")
                self.filter_keys[band]['cov_even'] = schema.addField(f'bfd_cov_even_{band}', type="ArrayF",
                                                                     size=self.n_even_single*(self.n_even_single+1)//2,
                                                                     doc=f"even moment covariance matrix in filter {band}")
                self.filter_keys[band]['cov_odd'] = schema.addField(f'bfd_cov_odd_{band}', type="ArrayF",
                                                                    size=self.n_odd_single*(self.n_odd_single+1)//2,
                                                                    doc=f"odd moment covariance matrix in filter {band}")

        return schema
Exemplo n.º 6
0
    def defineSchema(self, refSchema):

        self.mapper = SchemaMapper(refSchema)
        self.mapper.addMinimalSchema(SourceCatalog.Table.makeMinimalSchema(),
                                     True)
        schema = self.mapper.getOutputSchema()

        self.even = schema.addField('bfd_even',
                                    type="ArrayF",
                                    size=self.n_even,
                                    doc="Even Bfd moments")
        self.odd = schema.addField('bfd_odd',
                                   type="ArrayF",
                                   size=self.n_odd,
                                   doc="odd moments")
        self.shift = schema.addField('bfd_shift',
                                     type="ArrayF",
                                     size=2,
                                     doc="amount shifted to null moments")
        self.cov_even = schema.addField('bfd_cov_even',
                                        type="ArrayF",
                                        size=self.n_even * (self.n_even + 1) //
                                        2,
                                        doc="even moment covariance matrix")
        self.cov_odd = schema.addField('bfd_cov_odd',
                                       type="ArrayF",
                                       size=self.n_odd * (self.n_odd + 1) // 2,
                                       doc="odd moment covariance matrix")
        self.flag = schema.addField('bfd_flag',
                                    type="Flag",
                                    doc="Set to 1 for any fatal failure")
        self.centroid_flag = schema.addField(
            'bfd_flag_centroid',
            type="Flag",
            doc="Set to 1 for any fatal failure of centroid")
        self.parent_flag = schema.addField('bfd_flag_parent',
                                           type="Flag",
                                           doc="Set to 1 for parents")
        return schema
Exemplo n.º 7
0
    def defineSchema(self, refSchema):

        self.mapper = SchemaMapper(refSchema)
        self.mapper.addMinimalSchema(SourceCatalog.Table.makeMinimalSchema(), True)
        schema = self.mapper.getOutputSchema()
        self.pqrKey = schema.addField("pqr", doc="pqr", type="ArrayF",
                                      size=self.bfd.BFDConfig.DSIZE)
        self.momKey = schema.addField("moment", doc="moment", type="ArrayF",
                                      size=self.n_even)
        self.momCovKey = schema.addField("moment_cov", doc="moment", type="ArrayF",
                                         size=self.n_even*(self.n_even+1)//2)
        self.numKey = schema.addField("n_templates", doc="number", type=np.int64)
        self.uniqKey = schema.addField("n_unique", doc="unique", type=np.int32)
        self.zKey = schema.addField("z", doc="redshift", type=np.float)
        self.g1Key = schema.addField("g1", doc="redshift", type=np.float)
        self.g2Key = schema.addField("g2", doc="redshift", type=np.float)
        self.kappaKey = schema.addField("kappa", doc="redshift", type=np.float)
        self.magKey = schema.addField("mag", doc="redshift", type=np.float)
        self.labelKey = schema.addField("label", doc="redshift", type=str, size=10)
            # self.zIdKey = schema.addField("z_id", doc="redshift", type=np.int64)

        return schema
Exemplo n.º 8
0
def combineWithForce(meas, force):
    """Combine the meas and forced_src catalogs."""
    if len(meas) != len(force):
        raise Exception("# Meas and Forced_src catalogs should have "
                        "the same size!")
    mapper = SchemaMapper(meas.schema)
    mapper.addMinimalSchema(meas.schema)
    newSchema = mapper.getOutputSchema()
    # Add new fields
    newSchema.addField('force_deblend_nChild', type=np.int32)
    newSchema.addField('force_base_ClassificationExtendedness_value',
                       type=float)
    newSchema.addField('force_ext_photometryKron_KronFlux_instFlux',
                       type=float)
    newSchema.addField('force_ext_photometryKron_KronFlux_instFluxErr',
                       type=float)
    newSchema.addField('force_base_PsfFlux_instFlux', type=float)
    newSchema.addField('force_base_PsfFlux_instFluxErr', type=float)
    newSchema.addField('force_ext_photometryKron_KronFlux_apCorr', type=float)
    newSchema.addField('force_ext_photometryKron_KronFlux_apCorrErr',
                       type=float)
    newSchema.addField('force_base_PsfFlux_apCorr', type=float)
    newSchema.addField('force_base_PsfFlux_apCorrErr', type=float)
    newSchema.addField('force_modelfit_CModel_instFlux', type=float)
    newSchema.addField('force_modelfit_CModel_instFluxErr', type=float)
    newSchema.addField('force_modelfit_CModel_fracDev', type=float)
    newSchema.addField('force_modelfit_CModel_exp_instFlux', type=float)
    newSchema.addField('force_modelfit_CModel_exp_instFluxErr', type=float)
    newSchema.addField('force_modelfit_CModel_dev_instFlux', type=float)
    newSchema.addField('force_modelfit_CModel_dev_instFluxErr', type=float)
    newSchema.addField('force_modelfit_CModel_apCorr', type=float)
    newSchema.addField('force_modelfit_CModel_apCorrErr', type=float)
    newSchema.addField('force_modelfit_CModel_exp_apCorr', type=float)
    newSchema.addField('force_modelfit_CModel_exp_apCorrErr', type=float)
    newSchema.addField('force_modelfit_CModel_dev_apCorr', type=float)
    newSchema.addField('force_modelfit_CModel_dev_apCorrErr', type=float)

    newCols = [
        'deblend_nChild', 'base_ClassificationExtendedness_value',
        'ext_photometryKron_KronFlux_instFlux',
        'ext_photometryKron_KronFlux_instFluxErr', 'base_PsfFlux_instFlux',
        'base_PsfFlux_instFluxErr', 'ext_photometryKron_KronFlux_apCorr',
        'ext_photometryKron_KronFlux_apCorrErr', 'base_PsfFlux_apCorr',
        'base_PsfFlux_apCorrErr', 'modelfit_CModel_instFlux',
        'modelfit_CModel_instFluxErr', 'modelfit_CModel_exp_apCorr',
        'modelfit_CModel_exp_apCorrErr', 'modelfit_CModel_exp_instFlux',
        'modelfit_CModel_exp_instFlux', 'modelfit_CModel_exp_apCorr',
        'modelfit_CModel_exp_apCorrErr', 'modelfit_CModel_dev_instFlux',
        'modelfit_CModel_dev_instFluxErr', 'modelfit_CModel_dev_apCorr',
        'modelfit_CModel_dev_apCorrErr', 'modelfit_CModel_fracDev'
    ]
    measAlias = meas.schema.getAliasMap()
    newAlias = newSchema.getAliasMap()
    for aliasKey in measAlias.keys():
        newAlias.set(aliasKey, measAlias[aliasKey])
    combSrc = SourceCatalog(newSchema)
    combSrc.extend(meas, mapper=mapper)

    for key in newCols:
        combSrc['force_' + key][:] = force[key][:]

    return combSrc
Exemplo n.º 9
0
def getGalaxy(rootdir, visit, ccd, tol):
    """Get list of sources which agree in position with fake ones with tol
    """
    # Call the butler
    butler = dafPersist.Butler(rootdir)
    dataId = {'visit': visit, 'ccd': ccd}
    tol = float(tol)

    # Get the source catalog and metadata
    sources = butler.get('src', dataId)
    cal_md = butler.get('calexp_md', dataId)

    # Get the X, Y locations of objects on the CCD
    srcX, srcY = sources.getX(), sources.getY()
    # Get the zeropoint
    zeropoint = (2.5 * np.log10(cal_md.getScalar("FLUXMAG0")))
    # Get the parent ID
    parentID = sources.get('parent')
    # Check the star/galaxy separation
    extendClass = sources.get('classification.extendedness')
    # Get the nChild
    nChild = sources.get('deblend.nchild')

    # For Galaxies: Get these parameters
    # 1. Get the Kron flux and its error
    fluxKron, ferrKron = sources.get('flux.kron'), sources.get('flux.kron.err')
    magKron = (zeropoint - 2.5 * np.log10(fluxKron))
    merrKron = (2.5 / np.log(10) * (ferrKron / fluxKron))
    # X, Y locations of the fake galaxies
    fakeList = collections.defaultdict(tuple)
    # Regular Expression
    # Search for keywords like FAKE12
    fakename = re.compile('FAKE([0-9]+)')
    # Go through all the keywords
    counts = 0
    for card in cal_md.names():
        # To see if the card matches the pattern
        m = fakename.match(card)
        if m is not None:
            # Get the X,Y location for fake object
            x, y = list(map(float, (cal_md.getScalar(card)).split(',')))
            # Get the ID or index of the fake object
            fakeID = int(m.group(1))
            fakeList[counts] = [fakeID, x, y]
            counts += 1

    # Match the fake object to the source list
    srcIndex = collections.defaultdict(list)
    for fid, fcoord in fakeList.items():
        separation = np.sqrt(np.abs(srcX-fcoord[1])**2 +
                             np.abs(srcY-fcoord[2])**2)
        matched = (separation <= tol)
        matchId = np.where(matched)[0]
        matchSp = separation[matchId]
        sortId = [matchId for (matchSp, matchId) in
                  sorted(zip(matchSp, matchId))]
        # DEBUG:
        # print fid, fcoord, matchId
        # print sortId, sorted(matchSp), matchId
        # Select the index of all matched object
        srcIndex[fid] = sortId

    # Return the source list
    mapper = SchemaMapper(sources.schema)
    mapper.addMinimalSchema(sources.schema)
    newSchema = mapper.getOutputSchema()
    newSchema.addField('fakeId', type=int,
                       doc='id of fake source matched to position')
    srcList = SourceCatalog(newSchema)
    srcList.reserve(sum([len(s) for s in srcIndex.values()]))

    # Return a list of interesting parameters
    srcParam = []
    nFake = 0
    for matchIndex in srcIndex.values():
        # Check if there is a match
        if len(matchIndex) > 0:
            # Only select the one with the smallest separation
            # TODO: actually get the one with minimum separation
            ss = matchIndex[0]
            fakeObj = fakeList[nFake]
            diffX = srcX[ss] - fakeObj[1]
            diffY = srcY[ss] - fakeObj[2]
            paramList = (fakeObj[0], fakeObj[1], fakeObj[2],
                         magKron[ss], merrKron[ss], diffX, diffY,
                         parentID[ss], nChild[ss], extendClass[ss])
            srcParam.append(paramList)
        else:
            fakeObj = fakeList[nFake]
            paramList = (fakeObj[0], fakeObj[1], fakeObj[2],
                         0, 0, -1, -1, -1, -1, -1)
            srcParam.append(paramList)
        # Go to another fake object
        nFake += 1

    # Make a numpy record array
    srcParam = np.array(srcParam, dtype=[('fakeID', int),
                                         ('fakeX', float),
                                         ('fakeY', float),
                                         ('magKron', float),
                                         ('errKron', float),
                                         ('diffX', float),
                                         ('diffY', float),
                                         ('parentID', int),
                                         ('nChild', int),
                                         ('extendClass', float)])

    return srcIndex, srcParam, srcList, zeropoint
Exemplo n.º 10
0
def getGalaxy(rootdir, visit, ccd, tol):
    """Get list of sources which agree in position with fake ones with tol
    """
    # Call the butler
    butler = dafPersist.Butler(rootdir)
    dataId = {'visit':visit, 'ccd':ccd}
    tol = float(tol)

    # Get the source catalog and metadata
    sources = butler.get('src', dataId)
    cal_md  = butler.get('calexp_md', dataId)

    # Get the X, Y locations of objects on the CCD
    srcX, srcY = sources.getX(), sources.getY()
    # Get the zeropoint
    zeropoint = (2.5 * np.log10(cal_md.get("FLUXMAG0")))
    # Get the parent ID
    parentID = sources.get('parent')
    # Check the star/galaxy separation
    extendClass = sources.get('classification.extendedness')

    # For Galaxies: Get these parameters
    # 1. Get the Kron flux and its error
    fluxKron, ferrKron = sources.get('flux.kron'), sources.get('flux.kron.err')
    magKron, merrKron = (zeropoint - 2.5*np.log10(fluxKron)), (2.5/np.log(10)*
                                                            (ferrKron/fluxKron))
    # 2. Get the CModel flux and its error
    fluxCmod, ferrCmod = sources.get('cmodel.flux'), sources.get('cmodel.flux.err')
    magCmod, merrCmod = (zeropoint - 2.5*np.log10(fluxCmod)), (2.5/np.log(10)*
                                                            (ferrCmod/fluxCmod))
    # 3. Get the Exponential flux and its error
    fluxExp, ferrExp = sources.get('cmodel.exp.flux'), sources.get('cmodel.exp.flux.err')
    magExp, merrExp = (zeropoint - 2.5*np.log10(fluxExp)), (2.5/np.log(10)*
                                                            (ferrExp/fluxExp))
    # 4. Get the de Vacouleurs flux and its error
    fluxDev, ferrDev = sources.get('cmodel.dev.flux'), sources.get('cmodel.dev.flux.err')
    magDev, merrDev = (zeropoint - 2.5*np.log10(fluxDev)), (2.5/np.log(10)*
                                                            (ferrDev/fluxDev))
    # 5. Get the SDSS shapes (Re, b/a, PA)
    sdssMoment = sources.get('shape.sdss')
    sdssR, sdssBa, sdssPa = getSizeAndShape(sdssMoment)
    # 6. Get the Exponential shapes (Re, b/a, PA)
    expMoment = sources.get('cmodel.exp.ellipse')
    expR, expBa, expPa = getSizeAndShape(expMoment)
    # 7. Get the de Vaucouleurs shapes (Re, b/a, PA)
    devMoment = sources.get('cmodel.dev.ellipse')
    devR, devBa, devPa = getSizeAndShape(devMoment)
    # 8. Get the fracDev
    fracDev = sources.get('cmodel.fracDev')

    # X, Y locations of the fake stars
    fakeList = collections.defaultdict(tuple)
    # Regular Expression
    # Search for keywords like FAKE12
    fakename = re.compile('FAKE([0-9]+)')
    # Go through all the keywords
    counts = 0
    for card in cal_md.names():
        # To see if the card matches the pattern
        m = fakename.match(card)
        if m is not None:
            # Get the X,Y location for fake object
            x,y    = map(float, (cal_md.get(card)).split(','))
            # Get the ID or index of the fake object
            fakeID = int(m.group(1))
            fakeList[counts] = [fakeID, x, y]
            counts += 1

    # Match the fake object to the source list
    srcIndex = collections.defaultdict(list)
    for fid, fcoord  in fakeList.items():
        separation = np.sqrt(np.abs(srcX-fcoord[1])**2 +
                             np.abs(srcY-fcoord[2])**2)
        matched = (separation <= tol)
        matchId = np.where(matched)[0]
        matchSp = separation[matchId]
        sortId = [matchId for (matchSp, matchId) in sorted(zip(matchSp,
                                                               matchId))]
        # DEBUG:
        # print fid, fcoord, matchId
        print sortId, sorted(matchSp), matchId
        # Select the index of all matched object
        srcIndex[fid] = sortId

    # Return the source list
    mapper = SchemaMapper(sources.schema)
    mapper.addMinimalSchema(sources.schema)
    newSchema = mapper.getOutputSchema()
    newSchema.addField('fakeId', type=int,
                       doc='id of fake source matched to position')
    srcList = SourceCatalog(newSchema)
    srcList.reserve(sum([len(s) for s in srcIndex.values()]))

    # Return a list of interesting parameters
    #srcParam = collections.defaultdict(list)
    srcParam = []
    nFake = 0
    for matchIndex in srcIndex.values():
        # Check if there is a match
        if len(matchIndex) > 0:
            # Only select the one with the smallest separation
            # TODO: actually get the one with minimum separation
            ss = matchIndex[0]
            fakeObj = fakeList[nFake]
            diffX = srcX[ss] - fakeObj[1]
            diffY = srcY[ss] - fakeObj[2]
            paramList = (fakeObj[0], fakeObj[1], fakeObj[2],
                         magKron[ss], merrKron[ss], magCmod[ss], merrCmod[ss],
                         magExp[ss], merrExp[ss], magDev[ss], merrDev[ss],
                         sdssR[ss], sdssBa[ss], sdssPa[ss],
                         expR[ss], expBa[ss], expPa[ss],
                         devR[ss], devBa[ss], devPa[ss],
                         diffX, diffY, fracDev[ss],
                         parentID[ss], extendClass[ss])
            srcParam.append(paramList)
        else:
            paramList = (fakeObj[0], fakeObj[1], fakeObj[2],
                         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, -1,
                         0, -1, -1)
            srcParam.append(paramList)
        # Go to another fake object
        nFake += 1

    # Make a numpy record array
    srcParam = np.array(srcParam, dtype=[('fakeID', int),
                                         ('fakeX', float),
                                         ('fakeY', float),
                                         ('magKron', float),
                                         ('errKron', float),
                                         ('magCmod', float),
                                         ('errCmod', float),
                                         ('magExp', float),
                                         ('errExp', float),
                                         ('magDev', float),
                                         ('errDev', float),
                                         ('sdssR', float),
                                         ('sdssBa', float),
                                         ('sdssPa', float),
                                         ('expR', float),
                                         ('expBa', float),
                                         ('expPa', float),
                                         ('devR', float),
                                         ('devBa', float),
                                         ('devPa', float),
                                         ('diffX', float),
                                         ('diffY', float),
                                         ('fracDev', float),
                                         ('parentID', int),
                                         ('extendClass', float)])

    return srcIndex, srcParam, srcList, zeropoint
Exemplo n.º 11
0
def getFakeSources(
    butler,
    dataId,
    tol=1.0,
    extraCols=("zeropoint", "visit", "ccd"),
    includeMissing=False,
    footprints=False,
    radecMatch=None,
    multiband=False,
    reffMatch=False,
    pix=0.168,
    minRad=None,
    raCol="RA",
    decCol="Dec",
):
    """
    Get list of sources which agree in pixel position with fake ones with tol.

    This returns a sourceCatalog of all the matched fake objects,
    note, there will be duplicates in this list, since I haven't
    checked deblend.nchild, and I'm only doing a tolerance match,
    which could include extra sources

    The outputs can include extraCols as long as they are one of:
        zeropoint, visit, ccd, thetaNorth, pixelScale

    If includeMissing is true, then the pipeline looks at the fake sources
    added in the header and includes an entry in the table for sources without
    any measurements, specifically the 'id' column will be 0

    radecMatch is the fakes table. if it's not None(default), then do an ra/dec
    match with the input catalog instead of looking in the header for where the
    sources where added
    """
    pipeVersion = dafPersist.eupsVersions.EupsVersions().versions["hscPipe"]
    if StrictVersion(pipeVersion) >= StrictVersion("3.9.0"):
        coaddData = "deepCoadd_calexp"
        coaddMeta = "deepCoadd_calexp_md"
    else:
        coaddData = "deepCoadd"
        coaddMeta = "deepCoadd_md"

    availExtras = {
        "zeropoint": {"type": float, "doc": "zeropoint"},
        "visit": {"type": int, "doc": "visit id"},
        "ccd": {"type": int, "doc": "ccd id"},
        "thetaNorth": {"type": lsst.afw.geom.Angle, "doc": "angle to north"},
        "pixelScale": {"type": float, "doc": "pixelscale in arcsec/pixel"},
    }

    if not np.in1d(extraCols, availExtras.keys()).all():
        print "extraCols must be in ", availExtras

    try:
        if "filter" not in dataId:
            sources = butler.get("src", dataId, flags=lsst.afw.table.SOURCE_IO_NO_FOOTPRINTS, immediate=True)
            cal = butler.get("calexp", dataId, immediate=True)
            cal_md = butler.get("calexp_md", dataId, immediate=True)
        else:
            meas = butler.get("deepCoadd_meas", dataId, flags=NO_FOOTPRINT, immediate=True)
            force = butler.get("deepCoadd_forced_src", dataId, flags=NO_FOOTPRINT, immediate=True)
            sources = combineWithForce(meas, force)
            cal = butler.get(coaddData, dataId, immediate=True)
            cal_md = butler.get(coaddMeta, dataId, immediate=True)
    except (lsst.pex.exceptions.LsstException, RuntimeError):
        print "skipping", dataId
        return None

    if ("pixelScale" in extraCols) or ("thetaNorth" in extraCols):
        wcs = cal.getWcs()
        availExtras["pixelScale"]["value"] = wcs.pixelScale().asArcseconds()
        availExtras["thetaNorth"]["value"] = lsst.afw.geom.Angle(
            np.arctan2(*tuple(wcs.getLinearTransform().invert()(lsst.afw.geom.Point2D(1.0, 0.0))))
        )
    if "visit" in extraCols:
        availExtras["visit"]["value"] = dataId["visit"]
    if "ccd" in extraCols:
        availExtras["ccd"]["value"] = dataId["ccd"]
    if "zeropoint" in extraCols:
        zeropoint = 2.5 * np.log10(cal_md.get("FLUXMAG0"))
        availExtras["zeropoint"]["value"] = zeropoint

    if radecMatch is None:
        fakeXY, srcIndex = getFakeMatchesHeader(cal_md, sources, tol=tol)
    else:
        if minRad is not None:
            print "# The min matching radius is %4.1f pixel" % minRad
        bbox = lsst.afw.geom.Box2D(cal.getBBox(lsst.afw.image.PARENT))
        fakeXY, srcIndex, srcClose = getFakeMatchesRaDec(
            sources,
            radecMatch,
            bbox,
            cal.getWcs(),
            tol=tol,
            reffMatch=reffMatch,
            pix=pix,
            minRad=minRad,
            raCol=raCol,
            decCol=decCol,
        )

    mapper = SchemaMapper(sources.schema)
    mapper.addMinimalSchema(sources.schema)
    newSchema = mapper.getOutputSchema()
    newSchema.addField("fakeId", type=int, doc="id of fake source matched to position")
    newSchema.addField("nMatched", type=int, doc="Number of matched objects")
    newSchema.addField("nPrimary", type=int, doc="Number of unique matched objects")
    newSchema.addField("nNoChild", type=int, doc="Number of matched objects with nchild==0")
    newSchema.addField("rMatched", type=float, doc="Radius used form atching obects, in pixel")
    newSchema.addField("fakeOffX", type=float, doc="offset from input fake position in X (pixels)")
    newSchema.addField("fakeOffY", type=float, doc="offset from input fake position in Y (pixels)")
    newSchema.addField("fakeOffR", type=float, doc="offset from input fake position in radius")
    newSchema.addField("fakeClosest", type="Flag", doc="Is this match the closest one?")

    for extraName in set(extraCols).intersection(availExtras):
        newSchema.addField(extraName, type=availExtras[extraName]["type"], doc=availExtras[extraName]["doc"])

    srcList = SourceCatalog(newSchema)
    srcList.reserve(
        sum([len(s) for s in srcIndex.values()]) + (0 if not includeMissing else srcIndex.values().count([]))
    )

    centroidKey = sources.schema.find("centroid.sdss").getKey()
    isPrimary = sources.schema.find("detect.is-primary").getKey()
    nChild = sources.schema.find("force.deblend.nchild").getKey()
    for ident, sindlist in srcIndex.items():
        rMatched = fakeXY[ident][2]
        if minRad is not None:
            if rMatched < minRad:
                rMatched = minRad
        nMatched = len(sindlist)
        nPrimary = np.sum([sources[obj].get(isPrimary) for obj in sindlist])
        nNoChild = np.sum([(sources[obj].get(nChild) == 0) for obj in sindlist])
        if includeMissing and (nMatched == 0):
            newRec = srcList.addNew()
            newRec.set("fakeId", ident)
            newRec.set("id", 0)
            newRec.set("nMatched", 0)
            newRec.set("rMatched", rMatched)
        for ss in sindlist:
            newRec = srcList.addNew()
            newRec.assign(sources[ss], mapper)
            newRec.set("fakeId", ident)
            newRec.set("nMatched", nMatched)
            newRec.set("nPrimary", nPrimary)
            newRec.set("nNoChild", nNoChild)
            newRec.set("rMatched", rMatched)
            offsetX = sources[ss].get(centroidKey).getX() - fakeXY[ident][0]
            newRec.set("fakeOffX", offsetX)
            offsetY = sources[ss].get(centroidKey).getY() - fakeXY[ident][1]
            newRec.set("fakeOffY", offsetY)
            newRec.set("fakeOffR", np.sqrt(offsetX ** 2.0 + offsetY ** 2.0))
            if radecMatch:
                if ss == srcClose[ident]:
                    newRec.set("fakeClosest", True)
                else:
                    newRec.set("fakeClosest", False)

    if includeMissing:
        srcList = srcList.copy(deep=True)

    for extraName in set(extraCols).intersection(availExtras):
        tempCol = srcList.get(extraName)
        tempCol.fill(availExtras[extraName]["value"])

    return srcList
Exemplo n.º 12
0
def getStars(rootdir, visit, ccd, tol):
    """Get list of sources which agree in position with fake ones with tol
    """
    # Call the butler
    butler = dafPersist.Butler(rootdir)
    dataId = {'visit':visit, 'ccd':ccd}
    tol = float(tol)

    # Get the source catalog and metadata
    sources = butler.get('src', dataId)
    cal_md  = butler.get('calexp_md', dataId)

    # Get the X, Y locations of objects on the CCD
    srcX, srcY = sources.getX(), sources.getY()
    # Get the zeropoint
    zeropoint = (2.5 * np.log10(cal_md.get("FLUXMAG0")))
    # Get the parent ID
    parentID = sources.get('parent')
    # Check the star/galaxy separation
    extendClass = sources.get('classification.extendedness')
    # Get the nChild
    nChild = sources.get('deblend.nchild')
    # Get the aperture corrections
    # apcorr = sources.get('correctfluxes.apcorr')
    apcorr = sources.get('flux.sinc')

    # For Stars: Get these parameters
    # Get the PSF flux and its error
    flux, ferr = sources.getPsfFlux(), sources.getPsfFluxErr()
    # Convert them into magnitude and its error
    mag,  merr = 2.5*np.log10(flux), 2.5/np.log(10)*(ferr/flux)
    mag = zeropoint - mag

    apcorr = zeropoint - 2.5*np.log10(apcorr)

    # X, Y locations of the fake stars
    fakeList = collections.defaultdict(tuple)
    # Regular Expression
    # Search for keywords like FAKE12
    fakename = re.compile('FAKE([0-9]+)')
    # Go through all the keywords
    counts = 0
    for card in cal_md.names():
        # To see if the card matches the pattern
        m = fakename.match(card)
        if m is not None:
            # Get the X,Y location for fake object
            x,y    = map(float, (cal_md.get(card)).split(','))
            # Get the ID or index of the fake object
            fakeID = int(m.group(1))
            fakeList[counts] = [fakeID, x, y]
            counts += 1

    # Match the fake object to the source list
    srcIndex = collections.defaultdict(list)
    for fid, fcoord  in fakeList.items():
        separation = np.sqrt(np.abs(srcX-fcoord[1])**2 +
                             np.abs(srcY-fcoord[2])**2)
        matched = (separation <= tol)
        matchId = np.where(matched)[0]
        matchSp = separation[matchId]
        sortId = [matchId for (matchSp, matchId) in sorted(zip(matchSp, matchId))]
        # DEBUG:
        # print fid, fcoord, matchId
        # print sortId, sorted(matchSp), matchId
        # Select the index of all matched object
        srcIndex[fid] = sortId

    # Return the source list
    mapper = SchemaMapper(sources.schema)
    mapper.addMinimalSchema(sources.schema)
    newSchema = mapper.getOutputSchema()
    newSchema.addField('fakeId', type=int,
                       doc='id of fake source matched to position')
    srcList = SourceCatalog(newSchema)
    srcList.reserve(sum([len(s) for s in srcIndex.values()]))

    # Return a list of interesting parameters
    #srcParam = collections.defaultdict(list)
    srcParam = []
    nFake = 0
    for matchIndex in srcIndex.values():
        # Check if there is a match
        if len(matchIndex) > 0:
            # Only select the one with the smallest separation
            ss = matchIndex[0]
            fakeObj = fakeList[nFake]
            diffX = srcX[ss] - fakeObj[1]
            diffY = srcY[ss] - fakeObj[2]
            paramList = (fakeObj[0], fakeObj[1], fakeObj[2],
                         mag[ss], merr[ss], apcorr[ss], diffX, diffY,
                         parentID[ss], nChild[ss], extendClass[ss])
            srcParam.append(paramList)
        else:
            fakeObj = fakeList[nFake]
            paramList = (fakeObj[0], fakeObj[1], fakeObj[2],
                         0, 0, -1, -1, -1, -1, -1, -1)
            srcParam.append(paramList)
        # Go to another fake object
        nFake += 1

    # Make a numpy record array
    srcParam = np.array(srcParam, dtype=[('fakeID', int),
                                         ('fakeX', float),
                                         ('fakeY', float),
                                         ('psfMag', float),
                                         ('psfMagErr', float),
                                         ('apCorr', float),
                                         ('diffX', float),
                                         ('diffY', float),
                                         ('parentID', int),
                                         ('nChild', int),
                                         ('extendClass', float)])

    return srcIndex, srcParam, srcList, zeropoint
Exemplo n.º 13
0
    def _loadAndMatchCatalogs(self, repo, dataIds, matchRadius):
        """Load data from specific visit. Match with reference.

        Parameters
        ----------
        repo : string
            The repository.  This is generally the directory on disk
            that contains the repository and mapper.
        dataIds : list of dict
            List of `butler` data IDs of Image catalogs to compare to
            reference. The `calexp` cpixel image is needed for the photometric
            calibration.
        matchRadius :  afwGeom.Angle(), optional
            Radius for matching. Default is 1 arcsecond.

        Returns
        -------
        afw.table.GroupView
            An object of matched catalog.
        """
        # Following
        # https://github.com/lsst/afw/blob/tickets/DM-3896/examples/repeatability.ipynb
        butler = dafPersist.Butler(repo)
        dataset = 'src'

        # 2016-02-08 MWV:
        # I feel like I could be doing something more efficient with
        # something along the lines of the following:
        #    dataRefs = [dafPersist.ButlerDataRef(butler, vId) for vId in dataIds]

        ccdKeyName = getCcdKeyName(dataIds[0])

        schema = butler.get(dataset + "_schema", immediate=True).schema
        mapper = SchemaMapper(schema)
        mapper.addMinimalSchema(schema)
        mapper.addOutputField(Field[float]('base_PsfFlux_snr', 'PSF flux SNR'))
        mapper.addOutputField(Field[float]('base_PsfFlux_mag',
                                           'PSF magnitude'))
        mapper.addOutputField(Field[float]('base_PsfFlux_magerr',
                                           'PSF magnitude uncertainty'))
        newSchema = mapper.getOutputSchema()

        # Create an object that matches multiple catalogs with same schema
        mmatch = MultiMatch(newSchema,
                            dataIdFormat={
                                'visit': np.int32,
                                ccdKeyName: np.int32
                            },
                            radius=matchRadius,
                            RecordClass=SimpleRecord)

        # create the new extented source catalog
        srcVis = SourceCatalog(newSchema)

        for vId in dataIds:
            try:
                calexpMetadata = butler.get("calexp_md", vId, immediate=True)
            except (FitsError, dafPersist.NoResults) as e:
                print(e)
                print("Could not open calibrated image file for ", vId)
                print("Skipping %s " % repr(vId))
                continue
            except TypeError as te:
                # DECam images that haven't been properly reformatted
                # can trigger a TypeError because of a residual FITS header
                # LTV2 which is a float instead of the expected integer.
                # This generates an error of the form:
                #
                # lsst::pex::exceptions::TypeError: 'LTV2 has mismatched type'
                #
                # See, e.g., DM-2957 for details.
                print(te)
                print("Calibration image header information malformed.")
                print("Skipping %s " % repr(vId))
                continue

            calib = afwImage.Calib(calexpMetadata)

            oldSrc = butler.get('src', vId, immediate=True)
            print(
                len(oldSrc), "sources in ccd %s  visit %s" %
                (vId[ccdKeyName], vId["visit"]))

            # create temporary catalog
            tmpCat = SourceCatalog(SourceCatalog(newSchema).table)
            tmpCat.extend(oldSrc, mapper=mapper)
            tmpCat['base_PsfFlux_snr'][:] = tmpCat['base_PsfFlux_flux'] \
                / tmpCat['base_PsfFlux_fluxSigma']
            with afwImageUtils.CalibNoThrow():
                _ = calib.getMagnitude(tmpCat['base_PsfFlux_flux'],
                                       tmpCat['base_PsfFlux_fluxSigma'])
                tmpCat['base_PsfFlux_mag'][:] = _[0]
                tmpCat['base_PsfFlux_magerr'][:] = _[1]

            srcVis.extend(tmpCat, False)
            mmatch.add(catalog=tmpCat, dataId=vId)

        # Complete the match, returning a catalog that includes
        # all matched sources with object IDs that can be used to group them.
        matchCat = mmatch.finish()

        # Create a mapping object that allows the matches to be manipulated
        # as a mapping of object ID to catalog of sources.
        allMatches = GroupView.build(matchCat)

        return allMatches
Exemplo n.º 14
0
def _loadAndMatchCatalogs(repo,
                          dataIds,
                          matchRadius,
                          useJointCal=False,
                          skipTEx=False):
    """Load data from specific visit. Match with reference.

    Parameters
    ----------
    repo : string or Butler
        A Butler or a repository URL that can be used to construct one
    dataIds : list of dict
        List of `butler` data IDs of Image catalogs to compare to
        reference. The `calexp` cpixel image is needed for the photometric
        calibration.
    matchRadius :  afwGeom.Angle(), optional
        Radius for matching. Default is 1 arcsecond.
    useJointCal : `bool`, optional
        Use jointcal/meas_mosaic outputs to calibrate positions and fluxes.
    skipTEx : `bool`, optional
        Skip TEx calculations (useful for older catalogs that don't have
        PsfShape measurements).

    Returns
    -------
    catalog_list : afw.table.SourceCatalog
        List of all of the catalogs
    matched_catalog : afw.table.GroupView
        An object of matched catalog.
    """
    # Following
    # https://github.com/lsst/afw/blob/tickets/DM-3896/examples/repeatability.ipynb
    if isinstance(repo, dafPersist.Butler):
        butler = repo
    else:
        butler = dafPersist.Butler(repo)
    dataset = 'src'

    # 2016-02-08 MWV:
    # I feel like I could be doing something more efficient with
    # something along the lines of the following:
    #    dataRefs = [dafPersist.ButlerDataRef(butler, vId) for vId in dataIds]

    ccdKeyName = getCcdKeyName(dataIds[0])

    # Hack to support raft and sensor 0,1 IDs as ints for multimatch
    if ccdKeyName == 'sensor':
        ccdKeyName = 'raft_sensor_int'
        for vId in dataIds:
            vId[ccdKeyName] = raftSensorToInt(vId)

    schema = butler.get(dataset + "_schema").schema
    mapper = SchemaMapper(schema)
    mapper.addMinimalSchema(schema)
    mapper.addOutputField(Field[float]('base_PsfFlux_snr', 'PSF flux SNR'))
    mapper.addOutputField(Field[float]('base_PsfFlux_mag', 'PSF magnitude'))
    mapper.addOutputField(Field[float]('base_PsfFlux_magErr',
                                       'PSF magnitude uncertainty'))
    mapper.addOutputField(Field[float]('e1', 'Source Ellipticity 1'))
    mapper.addOutputField(Field[float]('e2', 'Source Ellipticity 1'))
    mapper.addOutputField(Field[float]('psf_e1', 'PSF Ellipticity 1'))
    mapper.addOutputField(Field[float]('psf_e2', 'PSF Ellipticity 1'))
    newSchema = mapper.getOutputSchema()
    newSchema.setAliasMap(schema.getAliasMap())

    # Create an object that matches multiple catalogs with same schema
    mmatch = MultiMatch(newSchema,
                        dataIdFormat={
                            'visit': np.int32,
                            ccdKeyName: np.int32
                        },
                        radius=matchRadius,
                        RecordClass=SimpleRecord)

    # create the new extented source catalog
    srcVis = SourceCatalog(newSchema)

    for vId in dataIds:

        if useJointCal:
            try:
                photoCalib = butler.get("jointcal_photoCalib", vId)
            except (FitsError, dafPersist.NoResults) as e:
                print(e)
                print("Could not open photometric calibration for ", vId)
                print("Skipping this dataId.")
                continue
            try:
                wcs = butler.get("jointcal_wcs", vId)
            except (FitsError, dafPersist.NoResults) as e:
                print(e)
                print("Could not open updated WCS for ", vId)
                print("Skipping this dataId.")
                continue
        else:
            try:
                photoCalib = butler.get("calexp_photoCalib", vId)
            except (FitsError, dafPersist.NoResults) as e:
                print(e)
                print("Could not open calibrated image file for ", vId)
                print("Skipping this dataId.")
                continue
            except TypeError as te:
                # DECam images that haven't been properly reformatted
                # can trigger a TypeError because of a residual FITS header
                # LTV2 which is a float instead of the expected integer.
                # This generates an error of the form:
                #
                # lsst::pex::exceptions::TypeError: 'LTV2 has mismatched type'
                #
                # See, e.g., DM-2957 for details.
                print(te)
                print("Calibration image header information malformed.")
                print("Skipping this dataId.")
                continue

        # We don't want to put this above the first "if useJointCal block"
        # because we need to use the first `butler.get` above to quickly
        # catch data IDs with no usable outputs.
        try:
            # HSC supports these flags, which dramatically improve I/O
            # performance; support for other cameras is DM-6927.
            oldSrc = butler.get('src', vId, flags=SOURCE_IO_NO_FOOTPRINTS)
        except (OperationalError, sqlite3.OperationalError):
            oldSrc = butler.get('src', vId)

        print(len(oldSrc),
              "sources in ccd %s  visit %s" % (vId[ccdKeyName], vId["visit"]))

        # create temporary catalog
        tmpCat = SourceCatalog(SourceCatalog(newSchema).table)
        tmpCat.extend(oldSrc, mapper=mapper)
        tmpCat['base_PsfFlux_snr'][:] = tmpCat['base_PsfFlux_instFlux'] \
            / tmpCat['base_PsfFlux_instFluxErr']

        if useJointCal:
            for record in tmpCat:
                record.updateCoord(wcs)
        photoCalib.instFluxToMagnitude(tmpCat, "base_PsfFlux", "base_PsfFlux")

        if not skipTEx:
            _, psf_e1, psf_e2 = ellipticity_from_cat(
                oldSrc, slot_shape='slot_PsfShape')
            _, star_e1, star_e2 = ellipticity_from_cat(oldSrc,
                                                       slot_shape='slot_Shape')
            tmpCat['e1'][:] = star_e1
            tmpCat['e2'][:] = star_e2
            tmpCat['psf_e1'][:] = psf_e1
            tmpCat['psf_e2'][:] = psf_e2

        srcVis.extend(tmpCat, False)
        mmatch.add(catalog=tmpCat, dataId=vId)

    # Complete the match, returning a catalog that includes
    # all matched sources with object IDs that can be used to group them.
    matchCat = mmatch.finish()

    # Create a mapping object that allows the matches to be manipulated
    # as a mapping of object ID to catalog of sources.
    allMatches = GroupView.build(matchCat)

    return srcVis, allMatches
Exemplo n.º 15
0
    def _loadAndMatchCatalogs(self,
                              repo,
                              dataIds,
                              matchRadius,
                              useJointCal=False):
        """Load data from specific visit. Match with reference.

        Parameters
        ----------
        repo : string or Butler
            A Butler or a repository URL that can be used to construct one
        dataIds : list of dict
            List of `butler` data IDs of Image catalogs to compare to
            reference. The `calexp` cpixel image is needed for the photometric
            calibration.
        matchRadius :  afwGeom.Angle(), optional
            Radius for matching. Default is 1 arcsecond.

        Returns
        -------
        afw.table.GroupView
            An object of matched catalog.
        """
        # Following
        # https://github.com/lsst/afw/blob/tickets/DM-3896/examples/repeatability.ipynb
        if isinstance(repo, dafPersist.Butler):
            butler = repo
        else:
            butler = dafPersist.Butler(repo)
        dataset = 'src'

        # 2016-02-08 MWV:
        # I feel like I could be doing something more efficient with
        # something along the lines of the following:
        #    dataRefs = [dafPersist.ButlerDataRef(butler, vId) for vId in dataIds]

        ccdKeyName = getCcdKeyName(dataIds[0])

        schema = butler.get(dataset + "_schema").schema
        mapper = SchemaMapper(schema)
        mapper.addMinimalSchema(schema)
        mapper.addOutputField(Field[float]('base_PsfFlux_snr', 'PSF flux SNR'))
        mapper.addOutputField(Field[float]('base_PsfFlux_mag',
                                           'PSF magnitude'))
        mapper.addOutputField(Field[float]('base_PsfFlux_magErr',
                                           'PSF magnitude uncertainty'))
        newSchema = mapper.getOutputSchema()
        newSchema.setAliasMap(schema.getAliasMap())

        # Create an object that matches multiple catalogs with same schema
        mmatch = MultiMatch(newSchema,
                            dataIdFormat={
                                'visit': np.int32,
                                ccdKeyName: np.int32
                            },
                            radius=matchRadius,
                            RecordClass=SimpleRecord)

        # create the new extented source catalog
        srcVis = SourceCatalog(newSchema)

        for vId in dataIds:

            if useJointCal:
                try:
                    photoCalib = butler.get("photoCalib", vId)
                except (FitsError, dafPersist.NoResults) as e:
                    print(e)
                    print("Could not open photometric calibration for ", vId)
                    print("Skipping %s " % repr(vId))
                    continue
                try:
                    md = butler.get("wcs_md", vId)
                    wcs = afwImage.makeWcs(md)
                except (FitsError, dafPersist.NoResults) as e:
                    print(e)
                    print("Could not open updated WCS for ", vId)
                    print("Skipping %s " % repr(vId))
                    continue
            else:
                try:
                    calexpMetadata = butler.get("calexp_md", vId)
                except (FitsError, dafPersist.NoResults) as e:
                    print(e)
                    print("Could not open calibrated image file for ", vId)
                    print("Skipping %s " % repr(vId))
                    continue
                except TypeError as te:
                    # DECam images that haven't been properly reformatted
                    # can trigger a TypeError because of a residual FITS header
                    # LTV2 which is a float instead of the expected integer.
                    # This generates an error of the form:
                    #
                    # lsst::pex::exceptions::TypeError: 'LTV2 has mismatched type'
                    #
                    # See, e.g., DM-2957 for details.
                    print(te)
                    print("Calibration image header information malformed.")
                    print("Skipping %s " % repr(vId))
                    continue

                calib = afwImage.Calib(calexpMetadata)

            # We don't want to put this above the first "if useJointCal block"
            # because we need to use the first `butler.get` above to quickly
            # catch data IDs with no usable outputs.
            try:
                # HSC supports these flags, which dramatically improve I/O
                # performance; support for other cameras is DM-6927.
                oldSrc = butler.get('src', vId, flags=SOURCE_IO_NO_FOOTPRINTS)
            except:
                oldSrc = butler.get('src', vId)
            print(
                len(oldSrc), "sources in ccd %s  visit %s" %
                (vId[ccdKeyName], vId["visit"]))

            # create temporary catalog
            tmpCat = SourceCatalog(SourceCatalog(newSchema).table)
            tmpCat.extend(oldSrc, mapper=mapper)
            tmpCat['base_PsfFlux_snr'][:] = tmpCat['base_PsfFlux_flux'] \
                / tmpCat['base_PsfFlux_fluxSigma']

            if useJointCal:
                for record in tmpCat:
                    record.updateCoord(wcs)
                photoCalib.instFluxToMagnitude(tmpCat, "base_PsfFlux",
                                               "base_PsfFlux")
            else:
                with afwImageUtils.CalibNoThrow():
                    _ = calib.getMagnitude(tmpCat['base_PsfFlux_flux'],
                                           tmpCat['base_PsfFlux_fluxSigma'])
                    tmpCat['base_PsfFlux_mag'][:] = _[0]
                    tmpCat['base_PsfFlux_magErr'][:] = _[1]

            srcVis.extend(tmpCat, False)
            mmatch.add(catalog=tmpCat, dataId=vId)

        # Complete the match, returning a catalog that includes
        # all matched sources with object IDs that can be used to group them.
        matchCat = mmatch.finish()

        # Create a mapping object that allows the matches to be manipulated
        # as a mapping of object ID to catalog of sources.
        allMatches = GroupView.build(matchCat)

        return allMatches
class ProcessCoaddsMetacalMaxTask(ProcessCoaddsNGMixBaseTask):
    _DefaultName = "processCoaddsMetacalMax"
    ConfigClass = ProcessCoaddsMetacalMaxConfig

    def defineSchema(self, refSchema):
        """Return the Schema for the output catalog.

        This may add or modify self.

        Parameters
        ----------
        refSchema : `lsst.afw.table.Schema`
            Schema of the input reference catalogs.

        Returns
        -------
        outputSchema : `lsst.afw.table.Schema`
            Schema of the output catalog.  Will be added as ``self.schema``
            by calling code.
        """
        self.mapper = SchemaMapper(refSchema)
        self.mapper.addMinimalSchema(SourceCatalog.Table.makeMinimalSchema(), True)
        schema = self.mapper.getOutputSchema()

        config=self.cdict

        model=config['obj']['model']
        n=self.get_namer()
        pn=self.get_psf_namer()

        # generic ngmix fields
        mtypes=[
            (n('flags'),'overall flags for the processing',np.int32,''),
            (n('stamp_size'),'size of postage stamp',np.int32,''),
            (n('maskfrac'),'mean masked fraction',np.float32,''),
        ]
        for filt in config['filters']:
            mtypes += [
                (n('maskfrac_%s' % filt),'masked fraction in %s filter' % filt,np.float32,''),
            ]


        # psf fitting related fields
        mtypes += [
            (pn('flags'),'overall flags for the PSF processing',np.int32,''),

            # mean over filters
            (pn('g2_mean'),'mean over filters of component 2 of the PSF ellipticity',np.float64,''),
            (pn('g1_mean'),'mean over filters of component 2 of the PSF ellipticity',np.float64,''),
            (pn('T_mean'),'mean over filters <x^2> + <y^2> for the gaussian mixture',np.float64,'arcsec^2'),
        ]

        # PSF measurements by filter
        for filt in config['filters']:
            pfn=self.get_psf_namer(filt=filt)
            mtypes += [
                (pfn('flags'), 'overall flags for PSF processing in %s filter' % filt, np.int32, ''),
                (pfn('row'),'offset from canonical row position',np.float64,'arcsec'),
                (pfn('col'),'offset from canonical col position',np.float64,'arcsec'),
                (pfn('g1'), 'component 1 of the PSF ellipticity in %s filter' % filt, np.float64, ''),
                (pfn('g2'), 'component 2 of the PSF ellipticity in %s filter' % filt, np.float64, ''),
                (pfn('T'), '<x^2> + <y^2> for the PSF in %s filter' % filt, np.float64, 'arcsec^2'),
            ]

        # PSF flux measurements, on the object, by filter
        #for filt in config['filters']:
        #    pfn=self.get_psf_flux_namer(filt)
        #    mtypes += [
        #        (pfn('flux_flags'),'flags for PSF template flux fitting in the %s filter' % filt,np.float64,''),
        #        (pfn('flux'),'PSF template flux in the %s filter' % filt,np.float64,''),
        #        (pfn('flux_err'),'error on PSF template flux in the %s filter' % filt,np.float64,''),
        #    ]

        # object fitting related fields
        for type in config['metacal']['types']:
            mn=self.get_model_namer(type=type)
            mtypes += [
                (mn('flags'),'flags for model fit',np.int32,''),
                (mn('nfev'),'number of function evaluations during fit',np.int32,''),
                (mn('chi2per'),'chi^2 per degree of freedom',np.float64,''),
                (mn('dof'),'number of degrees of freedom',np.int32,''),

                (mn('s2n'),'S/N for the fit',np.float64,''),

                (mn('row'),'offset from canonical row position',np.float64,'arcsec'),
                (mn('row_err'),'error on offset from canonical row position',np.float64,'arcsec'),
                (mn('col'),'offset from canonical col position',np.float64,'arcsec'),
                (mn('col_err'),'error on offset from canonical col position',np.float64,'arcsec'),
                (mn('g1'),'component 1 of the ellipticity',np.float64,''),
                (mn('g1_err'),'error on component 1 of the ellipticity',np.float64,''),
                (mn('g2'),'component 2 of the ellipticity',np.float64,''),
                (mn('g2_err'),'error on component 2 of the ellipticity',np.float64,''),
                (mn('T'),'<x^2> + <y^2> for the gaussian mixture',np.float64,'arcsec^2'),
                (mn('T_err'),'error on <x^2> + <y^2> for the gaussian mixture',np.float64,'arcsec^2'),
            ]
            if model in ['bd','bdf']:
                mtypes += [
                    (mn('fracdev'),'fraction of light in the bulge',np.float64,''),
                    (mn('fracdev_err'),'error on fraction of light in the bulge',np.float64,''),
                ]

            for filt in config['filters']:
                mfn=self.get_model_flux_namer(filt, type=type)
                mtypes += [
                    (mfn('flux'),'flux in the %s filter' % filt,np.float64,''),
                    (mfn('flux_err'),'error on flux in the %s filter' % filt,np.float64,''),
                ]

        for name,doc,dtype,units in mtypes:
            schema.addField(
                name,
                type=dtype,
                doc=doc,
                units=units,
            )

        return schema

    def _process_observations(self, id, mbobs):
        """
        process the input observations

        Parameters
        ----------
        mbobs: ngmix.MultiBandObsList
            ngmix multi-band observation.  we may loosen this to be  alist
            of them, for deblending

        Returns
        -------
        id: int
            ID of this observation
        results : `dict`
            Dictionary of outputs, with keys matching the fields added in
            `defineSchema()`.
        """

        if self.cdict['make_plots']:
            self._make_plots(id, mbobs)

        # start with a default result.  may not use if we get to
        # measurements
        maskfrac, maskfrac_byband = self._get_masked_fraction(mbobs)

        flags = self._check_obs(mbobs, maskfrac_byband)
        if flags != 0:
            res=self._get_default_result()
            res['maskfrac'], res['maskfrac_byband'] = maskfrac, maskfrac_byband
            res['flags'] = flags
            return res

        boot=self._get_bootstrapper(mbobs)
        boot.go()
        res=boot.result
        res['maskfrac'], res['maskfrac_byband'] = maskfrac, maskfrac_byband
        return res

    def _get_bootstrapper(self, mbobs):
        """
        get a bootstrapper to automate the processing
        """
        return bootstrap.MetacalMaxBootstrapper(
            mbobs,
            self.cdict,
            self.prior,
            self.rng,
        )

    def _get_default_result(self):
        """
        get the default result dict
        """
        return bootstrap.get_default_mcal_result()

    def _copy_result(self, mbobs, res, output):
        """
        copy the result dict to the output record
        """

        n=self.get_namer()
        stamp_shape = mbobs[0][0].image.shape
        stamp_size=stamp_shape[0]

        output[n('flags')] = res['mcal_flags']
        output[n('stamp_size')] = stamp_size
        output[n('maskfrac')] = res['maskfrac']
        for ifilt,filt in enumerate(self.cdict['filters']):
            output[n('maskfrac_%s' % filt)] = res['maskfrac_byband'][ifilt]

        self._copy_psf_fit_result(res['noshear']['psf'], output)
        self._copy_psf_fit_results_byband(res['noshear']['psf'], output)

        #self._copy_psf_flux_results_byband(res['psf_flux'], output)

        self._copy_model_result(res, output)

    def _copy_psf_fit_result(self, pres, output):
        """
        copy the PSF result dict to the output record.
        The statistics here are averaged over all bands
        """

        n=self.get_psf_namer()
        output[n('flags')] = pres['flags']
        if pres['flags'] == 0:
            output[n('g1_mean')] = pres['g1_mean']
            output[n('g2_mean')] = pres['g2_mean']
            output[n('T_mean')]  = pres['T_mean']

    def _copy_psf_fit_results_byband(self, pres, output):
        """
        copy the PSF result from each band to the output record.
        """

        if len(pres['byband'])==0:
            return

        config=self.cdict
        for ifilt,filt in enumerate(config['filters']):
            filt_res = pres['byband'][ifilt]

            if filt_res is not None:
                n=self.get_psf_namer(filt=filt)

                output[n('flags')] = filt_res['flags']
                if filt_res['flags'] == 0:
                    output[n('row')] = filt_res['pars'][0]
                    output[n('col')] = filt_res['pars'][1]
                    if filt_res['flags']==0:
                        for name in ['g1','g2','T']:
                            output[n(name)] = filt_res[name]

    def _copy_psf_flux_results_byband(self, pres, output):
        """
        copy the PSF flux fitting results from each band to the output record.
        """
        if len(pres['byband'])==0:
            return

        config=self.cdict
        for ifilt,filt in enumerate(config['filters']):
            filt_res = pres['byband'][ifilt]

            if filt_res is not None:
                n=self.get_psf_flux_namer(filt=filt)

                output[n('flux_flags')] = filt_res['flags']
                if filt_res['flags']==0:
                    output[n('flux')] = filt_res['flux']
                    output[n('flux_err')] = filt_res['flux_err']


    def _copy_model_result(self, res, output):
        """
        copy the model fitting result dict to the output record.
        """

        config=self.cdict

        types=config['metacal']['types']

        for type in types:
            ores=res[type]['obj']

            mn=self.get_model_namer(type=type)
            output[mn('flags')] = ores['flags']

            if 'nfev' in ores:
                # can be there even if the fit failed, but won't be there
                # if it wasn't attempted
                output[mn('nfev')] = ores['nfev']

            if ores['flags']==0:
                for n in ['chi2per','dof','s2n']:
                    output[mn(n)] = ores[n]

                ni=[('row',0),('col',1),('g1',2),('g2',3),('T',4)]
                if self.cdict['obj']['model'] in ['bd','bdf']:
                    ni += [('fracdev',5)]
                    flux_start=6
                else:
                    flux_start=5

                pars=ores['pars']
                perr=ores['pars_err']
                for n,i in ni:
                    output[mn(n)] = pars[i]
                    output[mn(n+'_err')] = perr[i]

                for ifilt, filt in enumerate(config['filters']):

                    ind=flux_start+ifilt
                    mfn=self.get_model_flux_namer(filt, type=type)
                    output[mfn('flux')] = pars[ind]
                    output[mfn('flux_err')] = perr[ind]

    def get_namer(self, type=None):
        """
        get a namer for this output type
        """
        front='mcal'

        back=None
        if type is not None:
            if type=='noshear':
                back=None
            else:
                back=type

        return Namer(front='mcal', back=back)

    def get_psf_namer(self, filt=None):
        """
        get a namer for this output type
        """
        front='mcal_psf'
        if filt is not None:
            front='%s_%s' % (front,filt)
        return Namer(front=front)

    def get_model_namer(self, type=None):
        """
        get a namer for this output type
        """
        config=self.cdict
        model=config['obj']['model']

        front='mcal_%s' % model

        if type is not None:
            if type=='noshear':
                back=None
            else:
                back=type

        return Namer(front=front, back=back)

    def get_model_flux_namer(self, filt, type=None):
        """
        get a namer for this output type
        """
        config=self.cdict
        model=config['obj']['model']
        front='mcal_%s' % model
        back=filt

        if type is not None:
            if type!='noshear':
                back='%s_%s' % (back, type)

        return Namer(front=front, back=back)

    def get_psf_flux_namer(self, filt):
        """
        get a namer for this output type
        """
        raise NotImplementedError('make work for metacal')
        front='mcal_psf'
        return Namer(front=front, back=filt)


    @property
    def prior(self):
        """
        set the joint prior used for object fitting
        """
        if not hasattr(self, '_prior'):
            # this is temporary until I can figure out how to get
            # an existing seeded rng

            conf=self.cdict
            nband=len(conf['filters'])
            model=conf['obj']['model']
            self._prior = priors.get_joint_prior(
                conf['obj'],
                nband,
                self.rng,
            )

        return self._prior
Exemplo n.º 17
0
def getFakeSources(butler, dataId, tol=1.0, extraCols=('zeropoint', 'visit', 'ccd'),
                   includeMissing=False, footprints=False, radecMatch=None):
    """Get list of sources which agree in pixel position with fake ones with tol
    
    this returns a sourceCatalog of all the matched fake objects,
    note, there will be duplicates in this list, since I haven't checked deblend.nchild,
    and I'm only doing a tolerance match, which could include extra sources
    
    the outputs can include extraCols as long as they are one of:
      zeropoint, visit, ccd, thetaNorth, pixelScale

    if includeMissing is true, then the pipeline looks at the fake sources
    added in the header and includes an entry in the table for sources without
    any measurements, specifically the 'id' column will be 0

    radecMatch is the fakes table. if it's not None(default), then do an ra/dec 
    match with the input catalog instead of looking in the header for where the 
    sources where added
    """
    
    availExtras = {'zeropoint':{'type':float, 'doc':'zeropoint'}, 
                   'visit':{'type':int, 'doc':'visit id'}, 
                   'ccd':{'type':int, 'doc':'ccd id'},
                   'thetaNorth':{'type':lsst.afw.geom.Angle, 'doc':'angle to north'},
                   'pixelScale':{'type':float, 'doc':'pixelscale in arcsec/pixel'}}
    
    if not np.in1d(extraCols, availExtras.keys()).all():
        print "extraCols must be in ",availExtras

    try:
        if not 'filter' in dataId:
            sources = butler.get('src', dataId, 
                                 flags=lsst.afw.table.SOURCE_IO_NO_FOOTPRINTS,
                                 immediate=True)
            cal = butler.get('calexp', dataId, immediate=True)
            cal_md = butler.get('calexp_md', dataId, immediate=True)
        else:
            sources = butler.get('deepCoadd_src', dataId, 
                                 flags=lsst.afw.table.SOURCE_IO_NO_FOOTPRINTS,
                                 immediate=True)
            cal = butler.get('deepCoadd', dataId, immediate=True)
            cal_md = butler.get('deepCoadd_md', dataId, immediate=True)
    except (lsst.pex.exceptions.LsstException, RuntimeError) as e:
        print "skipping", dataId
        return None
        
    if ('pixelScale' in extraCols) or ('thetaNorth' in extraCols):
        wcs = cal.getWcs()
        availExtras['pixelScale']['value'] =  wcs.pixelScale().asArcseconds()
        availExtras['thetaNorth']['value'] = lsst.afw.geom.Angle(
            np.arctan2(*tuple(wcs.getLinearTransform().invert()
                              (lsst.afw.geom.Point2D(1.0,0.0)))))
    if 'visit' in extraCols:
        availExtras['visit']['value'] = dataId['visit']
    if 'ccd' in extraCols:
        availExtras['ccd']['value'] = dataId['ccd']
    if 'zeropoint' in extraCols:
        availExtras['zeropoint']['value'] = 2.5*np.log10(cal_md.get('FLUXMAG0'))

        
    if radecMatch is None:
        fakeXY, srcIndex = getFakeMatchesHeader(cal_md, sources, tol=tol)
    else:
        fakeXY, srcIndex = getFakeMatchesRaDec(sources, radecMatch, 
                                               lsst.afw.geom.Box2D(cal.getBBox(lsst.afw.image.PARENT)),
                                               cal.getWcs(), 
                                               tol=tol)

    mapper = SchemaMapper(sources.schema)
    mapper.addMinimalSchema(sources.schema)
    newSchema = mapper.getOutputSchema()
    newSchema.addField('fakeId', type=int, doc='id of fake source matched to position')
    newSchema.addField('fakeOffset', type=lsst.afw.geom.Point2D,
                       doc='offset from input fake position (pixels)')

    for extraName in set(extraCols).intersection(availExtras):
        newSchema.addField(extraName, type=availExtras[extraName]['type'],
                           doc=availExtras[extraName]['doc'])

    srcList = SourceCatalog(newSchema)
    srcList.reserve(sum([len(s) for s in srcIndex.values()]) + 
                    (0 if not includeMissing else srcIndex.values().count([])))

    centroidKey = sources.schema.find('centroid.sdss').getKey()
    for ident, sindlist in srcIndex.items():
        if includeMissing and (len(sindlist)==0):
            newRec = srcList.addNew()
            newRec.set('fakeId', ident)
            newRec.set('id', 0)
        for ss in sindlist:
            newRec = srcList.addNew()
            newRec.assign(sources[ss], mapper)
            newRec.set('fakeId', ident)
            newRec.set('fakeOffset', 
                       lsst.afw.geom.Point2D(sources[ss].get(centroidKey).getX() - 
                                             fakeXY[ident][0],
                                             sources[ss].get(centroidKey).getY() - 
                                             fakeXY[ident][1]))

    if includeMissing:
        srcList = srcList.copy(deep=True)

    for extraName in set(extraCols).intersection(availExtras):
        tempCol = srcList.get(extraName)
        tempCol.fill(availExtras[extraName]['value'])

    return srcList
Exemplo n.º 18
0
class MeasureCoaddsBfdSingleTask(ProcessCoaddsTogetherTask):
    """
    Base class for ngmix tasks
    """
    _DefaultName = "MeasureCoaddsBfdSingleTask"
    ConfigClass = MeasureCoaddsBfdSingleConfig

    def __init__(self,
                 *,
                 config=None,
                 refSchema=None,
                 butler=None,
                 initInputs=None,
                 **kwds):

        ProcessCoaddsTogetherTask.__init__(self,
                                           config=config,
                                           refSchema=refSchema,
                                           butler=butler,
                                           initInputs=initInputs,
                                           **kwds)
        if refSchema is None:
            if butler is None:
                if initInputs is not None:
                    refSchema = initInputs.get("refSchema", None)
                if refSchema is None:
                    refSchema = SourceCatalog.Table.makeMinimalSchema()
            else:
                refSchema = butler.get(self.config.ref.name + "_schema").schema
        self.ncolors = 0
        self.bfd = BFDConfig(use_conc=self.config.use_conc,
                             use_mag=self.config.use_mag,
                             ncolors=self.ncolors)
        self.n_even = self.bfd.BFDConfig.MSIZE
        self.n_odd = self.bfd.BFDConfig.XYSIZE
        self.weight = KSigmaWeightF(self.config.weight_sigma,
                                    self.config.weight_n)
        self.schema = self.defineSchema(refSchema)

    def defineSchema(self, refSchema):

        self.mapper = SchemaMapper(refSchema)
        self.mapper.addMinimalSchema(SourceCatalog.Table.makeMinimalSchema(),
                                     True)
        schema = self.mapper.getOutputSchema()

        self.even = schema.addField('bfd_even',
                                    type="ArrayF",
                                    size=self.n_even,
                                    doc="Even Bfd moments")
        self.odd = schema.addField('bfd_odd',
                                   type="ArrayF",
                                   size=self.n_odd,
                                   doc="odd moments")
        self.shift = schema.addField('bfd_shift',
                                     type="ArrayF",
                                     size=2,
                                     doc="amount shifted to null moments")
        self.cov_even = schema.addField('bfd_cov_even',
                                        type="ArrayF",
                                        size=self.n_even * (self.n_even + 1) //
                                        2,
                                        doc="even moment covariance matrix")
        self.cov_odd = schema.addField('bfd_cov_odd',
                                       type="ArrayF",
                                       size=self.n_odd * (self.n_odd + 1) // 2,
                                       doc="odd moment covariance matrix")
        self.flag = schema.addField('bfd_flag',
                                    type="Flag",
                                    doc="Set to 1 for any fatal failure")
        self.centroid_flag = schema.addField(
            'bfd_flag_centroid',
            type="Flag",
            doc="Set to 1 for any fatal failure of centroid")
        self.parent_flag = schema.addField('bfd_flag_parent',
                                           type="Flag",
                                           doc="Set to 1 for parents")
        return schema

    def runDataRef(self, patchRefList):
        """Run this task via CmdLineTask and Gen2 Butler.
        Parameters
        ----------
        patchRefList : `list` of `lsst.daf.persistence.ButlerDataRef`
            A list of DataRefs for all filters in a single patch.
        """
        #import pdb;pdb.set_trace()
        images = {}
        replacers = {}
        mergedDataId = {
            "tract": patchRefList[0].dataId["tract"],
            "patch": patchRefList[0].dataId["patch"]
        }
        butler = patchRefList[0].butlerSubset.butler
        ref = butler.get("deepCoadd_ref", dataId=mergedDataId)
        imageId = butler.get("deepMergedCoaddId", dataId=mergedDataId)
        for patchRef in patchRefList:
            filt = getShortFilterName(patchRef.dataId["filter"])
            images[filt] = patchRef.get(self.config.images.name)

            fpCat = patchRef.get(self.config.deblendCatalog.name)
            footprints = {
                rec.getId(): (rec.getParent(), rec.getFootprint())
                for rec in fpCat
            }
            replacers[filt] = NoiseReplacer(self.config.deblendReplacer,
                                            exposure=images[filt],
                                            footprints=footprints,
                                            exposureId=imageId)
        results = self.run(images, ref, imageId=imageId, replacers=replacers)
        mergedDataId['filter'] = self.config.filters[0]
        butler.put(results.output,
                   self.config.output.name,
                   dataId=mergedDataId)

    def run(self, images, ref, replacers, imageId):
        """Process coadds from all bands for a single patch.
        This method should not add or modify self.
        So far all children are using this exact code so leaving
        it here for now. If we specialize a lot, might make a
        processor its own object
        Parameters
        ----------
        images : `dict` of `lsst.afw.image.ExposureF`
            Coadd images and associated metadata, keyed by filter name.
        ref : `lsst.afw.table.SourceCatalog`
            A catalog with one record for each object, containing "best"
            measurements across all bands.
        replacers : `dict` of `lsst.meas.base.NoiseReplacer`, optional
            A dictionary of `~lsst.meas.base.NoiseReplacer` objects that can
            be used to insert and remove deblended pixels for each object.
            When not `None`, all detected pixels in ``images`` will have
            *already* been replaced with noise, and this *must* be used
            to restore objects one at a time.
        imageId : `int`
            Unique ID for this unit of data.  Should be used (possibly
            indirectly) to seed random numbers.
        Returns
        -------
        results : `lsst.pipe.base.Struct`
            Struct with (at least) an `output` attribute that is a catalog
            to be written as ``self.config.output``.
        """

        if len(images) != len(self.config.filters):
            self.log.info(
                'Number of filters does not match the list of images given.  Skipping'
            )
            return None

        tm0 = time.time()
        nproc = 0
        #import pdb;pdb.set_trace()
        # Make an empty catalog
        output = SourceCatalog(self.schema)

        # Add mostly-empty rows to it, copying IDs from the ref catalog.
        output.extend(ref, mapper=self.mapper)

        min_index = self.config.start_index
        if self.config.num_to_process is None:
            max_index = len(ref)
        else:
            max_index = self.config.start_index + self.config.num_to_process

        #for n, (refRecord) in enumerate(zip(ref)):
        for n, (refRecord, outRecord) in enumerate(zip(ref, output)):
            if n < min_index or n >= max_index:
                continue

            if refRecord.get('deblend_nChild') != 0:
                outRecord.set(self.flag, 1)
                outRecord.set(self.parent_flag, 1)
                continue

            #outRecord = output.table.copyRecord(refRecord, self.mapper)
            #output._append(outRecord)

            self.log.info('index: %06d/%06d' % (n, max_index))
            nproc += 1

            outRecord.setFootprint(
                None)  # copied from ref; don't need to write these again

            # Insert the deblended pixels for just this object into all images.
            for r in replacers.values():
                r.insertSource(refRecord.getId())

            try:
                kgals = self.buildKGalaxy(refRecord, images)
                kc = KColorGalaxy(self.bfd, kgals)

            except Exception as e:
                kc = None

            if kc is None:
                outRecord.set(self.flag, 1)
                continue

            dx, badcentering, msg = kc.recenter(self.config.weight_sigma)

            if badcentering:
                self.log.info('Bad centering %s', msg)
                outRecord.set(self.flag, 1)
                outRecord.set(self.centroid_flag, 1)
                dx = [0, 0]

            mom, cov = kc.get_moment(dx[0], dx[1], True)
            mom_even = mom.m
            mom_odd = mom.xy
            cov_even = cov.m
            cov_odd = cov.xy

            cov_even_save = []
            cov_odd_save = []
            for ii in range(cov_even.shape[0]):
                cov_even_save.extend(cov_even[ii][ii:])
            for ii in range(cov_odd.shape[0]):
                cov_odd_save.extend(cov_odd[ii][ii:])

            outRecord.set(self.even, np.array(mom_even, dtype=np.float32))
            outRecord.set(self.odd, np.array(mom_odd, dtype=np.float32))
            outRecord.set(self.cov_even,
                          np.array(cov_even_save, dtype=np.float32))
            outRecord.set(self.cov_odd, np.array(cov_odd_save,
                                                 dtype=np.float32))
            outRecord.set(self.shift, np.array([dx[0], dx[1]],
                                               dtype=np.float32))

            # Remove the deblended pixels for this object so we can process the next one.
            for r in replacers.values():
                r.removeSource(refRecord.getId())
            del kgals
            del kc
            del mom
            del cov
        # Restore all original pixels in the images.
        if replacers is not None:
            for r in replacers.values():
                r.end()

        tm = time.time() - tm0
        self.log.info('time: %g min' % (tm / 60.0))
        self.log.info('time per: %g sec' % (tm / nproc))

        return Struct(output=output[min_index:max_index])

    def buildKGalaxy(self, record, exposures):

        center = record.getCentroid()
        band = self.config.filters[0]
        local_lin_wcs = exposures[band].getWcs().linearizePixelToSky(
            center, afwGeom.arcseconds)

        jacobian = local_lin_wcs.getLinear().getMatrix()
        sky_pos = exposures[band].getWcs().pixelToSky(center)
        uvref = (sky_pos.getRa().asArcseconds(),
                 sky_pos.getDec().asArcseconds())

        box = record.getFootprint().getBBox()
        xy_pos = (center.getX() - box.getMinX(), center.getY() - box.getMinY())

        bfd_wcs = bfd.WCS(jacobian, xyref=xy_pos, uvref=uvref)

        kgals = []
        for band in self.config.filters:
            exposure = exposures[band]
            factor = exposure.getMetadata().get('variance_scale')
            noise = np.sqrt(np.median(exposure.variance[box].array) / factor)
            image = exposure.image[box].array

            psf_image = exposure.getPsf().computeKernelImage(center).array

            kdata = bfd.generalImage(image,
                                     uvref,
                                     psf_image,
                                     wcs=bfd_wcs,
                                     pixel_noise=noise,
                                     size=self.config.grid_size)
            conjugate = set(np.where(kdata.conjugate.flatten() == False)[0])
            kgal = self.bfd.KGalaxy(self.weight, kdata.kval.flatten(),
                                    kdata.kx.flatten(), kdata.ky.flatten(),
                                    kdata.kvar.flatten(), kdata.d2k, conjugate)
            kgals.append(kgal)
        return kgals

    def selection(self, ref):
        childName = 'deblend_nChild'
        if ref.getParent() == 0 and ref.get(childName) > 0:
            return False
        return True
Exemplo n.º 19
0
def _loadAndMatchCatalogs(repo,
                          dataIds,
                          matchRadius,
                          doApplyExternalPhotoCalib=False,
                          externalPhotoCalibName=None,
                          doApplyExternalSkyWcs=False,
                          externalSkyWcsName=None,
                          skipTEx=False,
                          skipNonSrd=False):
    """Load data from specific visits and returned a calibrated catalog matched
    with a reference.

    Parameters
    ----------
    repo : `str` or `lsst.daf.persistence.Butler`
        A Butler or a repository URL that can be used to construct one.
    dataIds : list of dict
        List of butler data IDs of Image catalogs to compare to
        reference. The calexp cpixel image is needed for the photometric
        calibration.
    matchRadius :  `lsst.geom.Angle`, optional
        Radius for matching. Default is 1 arcsecond.
    doApplyExternalPhotoCalib : bool, optional
        Apply external photoCalib to calibrate fluxes.
    externalPhotoCalibName : str, optional
        Type of external `PhotoCalib` to apply.  Currently supported are jointcal,
        fgcm, and fgcm_tract.  Must be set if doApplyExternalPhotoCalib is True.
    doApplyExternalSkyWcs : bool, optional
        Apply external wcs to calibrate positions.
    externalSkyWcsName : str, optional
        Type of external `wcs` to apply.  Currently supported is jointcal.
        Must be set if "doApplyExternalWcs" is True.
    skipTEx : `bool`, optional
        Skip TEx calculations (useful for older catalogs that don't have
        PsfShape measurements).
    skipNonSrd : `bool`, optional
        Skip any metrics not defined in the LSST SRD; default False.

    Returns
    -------
    catalog : `lsst.afw.table.SourceCatalog`
        A new calibrated SourceCatalog.
    matches : `lsst.afw.table.GroupView`
        A GroupView of the matched sources.

    Raises
    ------
    RuntimeError:
        Raised if "doApplyExternalPhotoCalib" is True and "externalPhotoCalibName"
        is None, or if "doApplyExternalSkyWcs" is True and "externalSkyWcsName" is
        None.
    """

    if doApplyExternalPhotoCalib and externalPhotoCalibName is None:
        raise RuntimeError(
            "Must set externalPhotoCalibName if doApplyExternalPhotoCalib is True."
        )
    if doApplyExternalSkyWcs and externalSkyWcsName is None:
        raise RuntimeError(
            "Must set externalSkyWcsName if doApplyExternalSkyWcs is True.")

    # Following
    # https://github.com/lsst/afw/blob/tickets/DM-3896/examples/repeatability.ipynb
    if isinstance(repo, dafPersist.Butler):
        butler = repo
    else:
        butler = dafPersist.Butler(repo)
    dataset = 'src'

    # 2016-02-08 MWV:
    # I feel like I could be doing something more efficient with
    # something along the lines of the following:
    #    dataRefs = [dafPersist.ButlerDataRef(butler, vId) for vId in dataIds]

    ccdKeyName = getCcdKeyName(dataIds[0])

    # Hack to support raft and sensor 0,1 IDs as ints for multimatch
    if ccdKeyName == 'sensor':
        ccdKeyName = 'raft_sensor_int'
        for vId in dataIds:
            vId[ccdKeyName] = raftSensorToInt(vId)

    schema = butler.get(dataset + "_schema").schema
    mapper = SchemaMapper(schema)
    mapper.addMinimalSchema(schema)
    mapper.addOutputField(Field[float]('base_PsfFlux_snr', 'PSF flux SNR'))
    mapper.addOutputField(Field[float]('base_PsfFlux_mag', 'PSF magnitude'))
    mapper.addOutputField(Field[float]('base_PsfFlux_magErr',
                                       'PSF magnitude uncertainty'))
    if not skipNonSrd:
        # Needed because addOutputField(... 'slot_ModelFlux_mag') will add a field with that literal name
        aliasMap = schema.getAliasMap()
        # Possibly not needed since base_GaussianFlux is the default, but this ought to be safe
        modelName = aliasMap[
            'slot_ModelFlux'] if 'slot_ModelFlux' in aliasMap.keys(
            ) else 'base_GaussianFlux'
        mapper.addOutputField(Field[float](f'{modelName}_mag',
                                           'Model magnitude'))
        mapper.addOutputField(Field[float](f'{modelName}_magErr',
                                           'Model magnitude uncertainty'))
        mapper.addOutputField(Field[float](f'{modelName}_snr',
                                           'Model flux snr'))
    mapper.addOutputField(Field[float]('e1', 'Source Ellipticity 1'))
    mapper.addOutputField(Field[float]('e2', 'Source Ellipticity 1'))
    mapper.addOutputField(Field[float]('psf_e1', 'PSF Ellipticity 1'))
    mapper.addOutputField(Field[float]('psf_e2', 'PSF Ellipticity 1'))
    newSchema = mapper.getOutputSchema()
    newSchema.setAliasMap(schema.getAliasMap())

    # Create an object that matches multiple catalogs with same schema
    mmatch = MultiMatch(newSchema,
                        dataIdFormat={
                            'visit': np.int32,
                            ccdKeyName: np.int32
                        },
                        radius=matchRadius,
                        RecordClass=SimpleRecord)

    # create the new extented source catalog
    srcVis = SourceCatalog(newSchema)

    for vId in dataIds:
        if not butler.datasetExists('src', vId):
            print(f'Could not find source catalog for {vId}; skipping.')
            continue

        photoCalib = _loadPhotoCalib(butler, vId, doApplyExternalPhotoCalib,
                                     externalPhotoCalibName)
        if photoCalib is None:
            continue

        if doApplyExternalSkyWcs:
            wcs = _loadExternalSkyWcs(butler, vId, externalSkyWcsName)
            if wcs is None:
                continue

        # We don't want to put this above the first _loadPhotoCalib call
        # because we need to use the first `butler.get` in there to quickly
        # catch dataIDs with no usable outputs.
        try:
            # HSC supports these flags, which dramatically improve I/O
            # performance; support for other cameras is DM-6927.
            oldSrc = butler.get('src', vId, flags=SOURCE_IO_NO_FOOTPRINTS)
        except (OperationalError, sqlite3.OperationalError):
            oldSrc = butler.get('src', vId)

        print(len(oldSrc),
              "sources in ccd %s  visit %s" % (vId[ccdKeyName], vId["visit"]))

        # create temporary catalog
        tmpCat = SourceCatalog(SourceCatalog(newSchema).table)
        tmpCat.extend(oldSrc, mapper=mapper)
        tmpCat['base_PsfFlux_snr'][:] = tmpCat['base_PsfFlux_instFlux'] \
            / tmpCat['base_PsfFlux_instFluxErr']

        if doApplyExternalSkyWcs:
            afwTable.updateSourceCoords(wcs, tmpCat)
        photoCalib.instFluxToMagnitude(tmpCat, "base_PsfFlux", "base_PsfFlux")
        if not skipNonSrd:
            tmpCat['slot_ModelFlux_snr'][:] = (
                tmpCat['slot_ModelFlux_instFlux'] /
                tmpCat['slot_ModelFlux_instFluxErr'])
            photoCalib.instFluxToMagnitude(tmpCat, "slot_ModelFlux",
                                           "slot_ModelFlux")

        if not skipTEx:
            _, psf_e1, psf_e2 = ellipticity_from_cat(
                oldSrc, slot_shape='slot_PsfShape')
            _, star_e1, star_e2 = ellipticity_from_cat(oldSrc,
                                                       slot_shape='slot_Shape')
            tmpCat['e1'][:] = star_e1
            tmpCat['e2'][:] = star_e2
            tmpCat['psf_e1'][:] = psf_e1
            tmpCat['psf_e2'][:] = psf_e2

        srcVis.extend(tmpCat, False)
        mmatch.add(catalog=tmpCat, dataId=vId)

    # Complete the match, returning a catalog that includes
    # all matched sources with object IDs that can be used to group them.
    matchCat = mmatch.finish()

    # Create a mapping object that allows the matches to be manipulated
    # as a mapping of object ID to catalog of sources.
    allMatches = GroupView.build(matchCat)

    return srcVis, allMatches
Exemplo n.º 20
0
def match_catalogs(inputs,
                   photoCalibs,
                   astromCalibs,
                   vIds,
                   matchRadius,
                   apply_external_wcs=False,
                   logger=None):
    schema = inputs[0].schema
    mapper = SchemaMapper(schema)
    mapper.addMinimalSchema(schema)
    mapper.addOutputField(Field[float]('base_PsfFlux_snr', 'PSF flux SNR'))
    mapper.addOutputField(Field[float]('base_PsfFlux_mag', 'PSF magnitude'))
    mapper.addOutputField(Field[float]('base_PsfFlux_magErr',
                                       'PSF magnitude uncertainty'))
    # Needed because addOutputField(... 'slot_ModelFlux_mag') will add a field with that literal name
    aliasMap = schema.getAliasMap()
    # Possibly not needed since base_GaussianFlux is the default, but this ought to be safe
    modelName = aliasMap['slot_ModelFlux'] if 'slot_ModelFlux' in aliasMap.keys(
    ) else 'base_GaussianFlux'
    mapper.addOutputField(Field[float](f'{modelName}_mag', 'Model magnitude'))
    mapper.addOutputField(Field[float](f'{modelName}_magErr',
                                       'Model magnitude uncertainty'))
    mapper.addOutputField(Field[float](f'{modelName}_snr', 'Model flux snr'))
    mapper.addOutputField(Field[float]('e1', 'Source Ellipticity 1'))
    mapper.addOutputField(Field[float]('e2', 'Source Ellipticity 1'))
    mapper.addOutputField(Field[float]('psf_e1', 'PSF Ellipticity 1'))
    mapper.addOutputField(Field[float]('psf_e2', 'PSF Ellipticity 1'))
    mapper.addOutputField(Field[np.int32]('filt', 'filter code'))
    newSchema = mapper.getOutputSchema()
    newSchema.setAliasMap(schema.getAliasMap())

    # Create an object that matches multiple catalogs with same schema
    mmatch = MultiMatch(newSchema,
                        dataIdFormat={
                            'visit': np.int32,
                            'detector': np.int32
                        },
                        radius=matchRadius,
                        RecordClass=SimpleRecord)

    # create the new extended source catalog
    srcVis = SourceCatalog(newSchema)

    filter_dict = {
        'u': 1,
        'g': 2,
        'r': 3,
        'i': 4,
        'z': 5,
        'y': 6,
        'HSC-U': 1,
        'HSC-G': 2,
        'HSC-R': 3,
        'HSC-I': 4,
        'HSC-Z': 5,
        'HSC-Y': 6
    }

    # Sort by visit, detector, then filter
    vislist = [v['visit'] for v in vIds]
    ccdlist = [v['detector'] for v in vIds]
    filtlist = [v['band'] for v in vIds]
    tab_vids = Table([vislist, ccdlist, filtlist],
                     names=['vis', 'ccd', 'filt'])
    sortinds = np.argsort(tab_vids, order=('vis', 'ccd', 'filt'))

    for ind in sortinds:
        oldSrc = inputs[ind]
        photoCalib = photoCalibs[ind]
        wcs = astromCalibs[ind]
        vId = vIds[ind]

        if logger:
            logger.debug(
                f"{len(oldSrc)} sources in ccd {vId['detector']}  visit {vId['visit']}"
            )

        # create temporary catalog
        tmpCat = SourceCatalog(SourceCatalog(newSchema).table)
        tmpCat.extend(oldSrc, mapper=mapper)

        filtnum = filter_dict[vId['band']]
        tmpCat['filt'] = np.repeat(filtnum, len(oldSrc))

        tmpCat['base_PsfFlux_snr'][:] = tmpCat['base_PsfFlux_instFlux'] \
            / tmpCat['base_PsfFlux_instFluxErr']

        if apply_external_wcs and wcs is not None:
            updateSourceCoords(wcs, tmpCat)

        photoCalib.instFluxToMagnitude(tmpCat, "base_PsfFlux", "base_PsfFlux")
        tmpCat['slot_ModelFlux_snr'][:] = (
            tmpCat['slot_ModelFlux_instFlux'] /
            tmpCat['slot_ModelFlux_instFluxErr'])
        photoCalib.instFluxToMagnitude(tmpCat, "slot_ModelFlux",
                                       "slot_ModelFlux")

        _, psf_e1, psf_e2 = ellipticity_from_cat(oldSrc,
                                                 slot_shape='slot_PsfShape')
        _, star_e1, star_e2 = ellipticity_from_cat(oldSrc,
                                                   slot_shape='slot_Shape')
        tmpCat['e1'][:] = star_e1
        tmpCat['e2'][:] = star_e2
        tmpCat['psf_e1'][:] = psf_e1
        tmpCat['psf_e2'][:] = psf_e2

        srcVis.extend(tmpCat, False)
        mmatch.add(catalog=tmpCat, dataId=vId)

    # Complete the match, returning a catalog that includes
    # all matched sources with object IDs that can be used to group them.
    matchCat = mmatch.finish()

    # Create a mapping object that allows the matches to be manipulated
    # as a mapping of object ID to catalog of sources.

    # I don't think I can persist a group view, so this may need to be called in a subsequent task
    # allMatches = GroupView.build(matchCat)

    return srcVis, matchCat
Exemplo n.º 21
0
class MeasureCoaddsPqrTask(ProcessCoaddsTogetherTask):
    """
    Base class for ngmix tasks
    """
    _DefaultName = "MeasureCoaddsPqrTask"
    ConfigClass = MeasureCoaddsPqrConfig
    RunnerClass = MyTaskRunner

    def __init__(self, *, config=None, refSchema=None, butler=None, initInputs=None, **kwds):
        #import pdb;pdb.set_trace()
        ProcessCoaddsTogetherTask.__init__(self, config=config, refSchema=refSchema, butler=butler,
                                           initInputs=initInputs, **kwds)
        if refSchema is None:
            if butler is None:
                if initInputs is not None:
                    refSchema = initInputs.get("refSchema", None)
                if refSchema is None:
                    refSchema = SourceCatalog.Table.makeMinimalSchema()
            else:
                refSchema = butler.get(self.config.ref.name + "_schema").schema
        self.ncolors = len(self.config.filters) - 1
        self.bfd = BFDConfig(use_conc=self.config.use_conc, use_mag=self.config.use_mag,
                             ncolors=self.ncolors)
        self.n_even = self.bfd.BFDConfig.MSIZE
        self.n_odd = self.bfd.BFDConfig.XYSIZE
        self.weight = KSigmaWeightF(self.config.weight_sigma, self.config.weight_n)
        self.schema = self.defineSchema(refSchema)
        self.ud = UniformDeviate()
        print('Reinitialized')
        self.initialized = False
    @classmethod
    def _makeArgumentParser(cls):
        # Customize argument parsing for CmdLineTask.
        parser = ArgumentParser(name=cls._DefaultName)
        # This should be config.images.name, but there's no way to pass that
        # information in here in Gen2.
        parser.add_argument("--dir", dest="dir", default='./',
                            help="location of files")
        return parser

    def defineSchema(self, refSchema):

        self.mapper = SchemaMapper(refSchema)
        self.mapper.addMinimalSchema(SourceCatalog.Table.makeMinimalSchema(), True)
        schema = self.mapper.getOutputSchema()
        self.pqrKey = schema.addField("pqr", doc="pqr", type="ArrayF",
                                      size=self.bfd.BFDConfig.DSIZE)
        self.momKey = schema.addField("moment", doc="moment", type="ArrayF",
                                      size=self.n_even)
        self.momCovKey = schema.addField("moment_cov", doc="moment", type="ArrayF",
                                         size=self.n_even*(self.n_even+1)//2)
        self.numKey = schema.addField("n_templates", doc="number", type=np.int64)
        self.uniqKey = schema.addField("n_unique", doc="unique", type=np.int32)
        self.zKey = schema.addField("z", doc="redshift", type=np.float)
        self.g1Key = schema.addField("g1", doc="redshift", type=np.float)
        self.g2Key = schema.addField("g2", doc="redshift", type=np.float)
        self.kappaKey = schema.addField("kappa", doc="redshift", type=np.float)
        self.magKey = schema.addField("mag", doc="redshift", type=np.float)
        self.labelKey = schema.addField("label", doc="redshift", type=str, size=10)
            # self.zIdKey = schema.addField("z_id", doc="redshift", type=np.int64)

        return schema

    def runDataRef(self, files):
        """Run this task via CmdLineTask and Gen2 Butler.
        Parameters
        ----------
        patchRefList : `list` of `lsst.daf.persistence.ButlerDataRef`
            A list of DataRefs for all filters in a single patch.
        """
        self.prep()

        for file in files:
            cat = afwTable.BaseCatalog.readFits(file)

            mask = ((cat['moments_r_even'][:, 0] >= self.fluxMin) &
                    (cat['moments_r_even'][:, 0] < self.fluxMax) &
                    (cat['moments_r_cov_even'][:, 0] >= self.varMin) &
                    (cat['moments_r_cov_even'][:, 0] < self.varMax) &
                    (cat['z'] >= self.zMin) &
                    (cat['z'] < self.zMax))
            tgs = []
            for rec in cat[mask]:
                cov_even = rec.get('moments_r_cov_even')
                cov_odd = rec.get('moments_r_cov_odd')
                full_cov_even = np.zeros((self.n_even, self.n_even), dtype=np.float32)
                full_cov_odd = np.zeros((self.n_odd, self.n_odd), dtype=np.float32)

                start = 0
                for i in range(self.n_even):
                    full_cov_even[i][i:] = cov_even[start:start + self.n_even - i]
                    start += self.n_even - i

                for i in range(self.n_even):
                    for j in range(i):
                        full_cov_even[i, j] = full_cov_even[j, i]

                start = 0
                for i in range(self.n_odd):
                    full_cov_odd[i][i:] = cov_odd[start:start + self.n_odd - i]
                    start += self.n_odd - i

                for i in range(self.n_odd):
                    for j in range(i):
                        full_cov_odd[i, j] = full_cov_odd[j, i]

                cov = self.bfd.MomentCov(full_cov_even, full_cov_odd)
                #mom_odd = np.array([0, 0]
                moment = self.bfd.Moment(rec.get('moments_r_even'), rec.get('moments_r_odd'))
                pos = np.array([rec.get('ra'), rec.get('dec')])
                tg = self.bfd.TargetGalaxy(moment, cov, pos, rec.get('id'))
                tgs.append(tg)

            results = self.prior.getPqrCatalog(tgs[:10], self.config.threads, self.config.chunk)

            outcat = afwTable.BaseCatalog(self.schema)
            raKey = self.schema['coord_ra'].asKey()
            decKey = self.schema['coord_ra'].asKey()
            idKey = self.schema['id'].asKey()

            for r, rec in zip(results, cat[mask][:10]):
                out = outcat.addNew()
                out.set(self.pqrKey, r[0]._pqr)
                out.set(self.numKey, r[1])
                out.set(self.uniqKey, r[2])
                out.set(self.momKey, rec.get('moments_r_even'))
                out.set(self.momCovKey, rec.get('moments_r_cov_even'))
                out.set(self.zKey, rec.get('z'))
                out.set(self.g1Key, rec.get('g1'))
                out.set(self.g1Key, rec.get('g2'))
                out.set(self.kappaKey, rec.get('kappa'))
                out.set(self.magKey, rec.get('mag'))
                out.set(self.labelKey, self.config.label)
                out.set(raKey, rec.get('ra')*afwGeom.degrees)
                out.set(decKey, rec.get('dec')*afwGeom.degrees)
                out.set(idKey, rec.get('id'))

            outfile = file.replace('moment', 'pqr')
            outcat.writeFits(outfile)

    def prep(self):

        if self.initialized:
            return

        self.prior = None
        priorFiles = []
        priorButler = Butler(self.config.priorRerun)
        prior_skyMap = priorButler.get('deepCoadd_skyMap')

        for tract in self.config.priorTracts:
            for patchInfo in prior_skyMap[tract]:
                patch = '%d,%d' % patchInfo.getIndex()

                if self.config.priorPatches:
                    if patch not in self.config.priorPatches:
                        continue

                if priorButler.datasetExists('deepCoadd_prior', tract=tract, patch=patch,
                                             filter=self.config.priorFilter, label=self.config.priorLabel):
                    priorFiles.append(priorButler.getUri('deepCoadd_prior',
                                                         tract=tract, patch=patch,
                                                         filter=self.config.priorFilter,
                                                         label=self.config.priorLabel))

        max_file = len(priorFiles)
        if self.config.maxPriorFiles > 0:
            max_file = self.config.maxPriorFiles

        self.zBin = None
        for file in priorFiles[:max_file]:
            if file.find('_parent') > 0:
                self.log.info("Skipping %s, from parent" % file)
                continue
            self.log.info("Adding prior %s" % file)
            try:
                cat = afwTable.BaseCatalog.readFits(file)
                md = cat.getTable().getMetadata().toDict()

                if self.prior is None:
                    self.fluxMin = md['FLUXMIN']
                    self.fluxMax = md['FLUXMAX']
                    self.varMin = md['VARMIN']
                    self.varMax = md['VARMAX']
                    cov_even = np.array(md['COV_EVEN'])
                    cov_odd = np.array(md['COV_ODD'])
                    self.zMax = md['ZMAXCUT']
                    self.zMin = md['ZMINCUT']
                    self.noiseFactor = md['noiseFactor']
                    self.priorSigmaCutoff = md['priorSigmaCutoff']
                    self.priorSigmaStep = md['priorSigmaStep']
                    self.priorSigmaBuffer = md['priorSigmaBuffer']
                    self.nSample = md['NSAMPLE']
                    self.selectionOnly = md['selectionOnly']
                    self.invariantCovariance = md['invariantCovariance']

                    covMat = self.bfd.MomentCov(cov_even.reshape(self.n_even, self.n_even),
                                                cov_odd.reshape(self.n_odd, self.n_odd))
                    self.prior = self.bfd.KDTreePrior(self.fluxMin, self.fluxMax, covMat, self.ud,
                                                      self.nSample, self.selectionOnly,
                                                      self.noiseFactor, self.priorSigmaStep,
                                                      self.priorSigmaCutoff, self.priorSigmaBuffer,
                                                      self.invariantCovariance)
                else:
                    fluxMin = md['FLUXMIN']
                    fluxMax = md['FLUXMAX']
                    varMin = md['VARMIN']
                    varMax = md['VARMAX']
                    cov_even = np.array(md['COV_EVEN'])
                    cov_odd = np.array(md['COV_ODD'])
                    zMax = md['ZMAXCUT']
                    zMin = md['ZMINCUT']
                    noiseFactor = md['noiseFactor']
                    priorSigmaCutoff = md['priorSigmaCutoff']
                    priorSigmaStep = md['priorSigmaStep']
                    priorSigmaBuffer = md['priorSigmaBuffer']
                    nSample = md['NSAMPLE']
                    selectionOnly = md['selectionOnly']
                    invariantCovariance = md['invariantCovariance']

                    mismatch = False
                    if fluxMin != self.fluxMin:
                        self.log.info('does not match fluxMin')
                        mismatch = True
                    if fluxMax != self.fluxMax:
                        self.log.info('does not match fluxMax')
                        mismatch = True
                    if varMin != self.varMin:
                        self.log.info('does not match varMin')
                        mismatch = True
                    if varMax != self.varMax:
                        self.log.info('does not match varMax')
                        mismatch = True
                    if zMin != self.zMin:
                        self.log.info('does not match zMin')
                        mismatch = True
                    if zMax != self.zMax:
                        self.log.info('does not match zMax')
                        mismatch = True
                    if noiseFactor != self.noiseFactor:
                        self.log.info('does not match fluxMin')
                        mismatch = True
                    if priorSigmaBuffer != self.priorSigmaBuffer:
                        self.log.info('does not match priorSigmaBuffer')
                        mismatch = True
                    if priorSigmaStep != self.priorSigmaStep:
                        self.log.info('does not match priorSigmaStep')
                        mismatch = True
                    if priorSigmaCutoff != self.priorSigmaCutoff:
                        self.log.info('does not match priorSigmaCutoff')
                        mismatch = True
                    if nSample != self.nSample:
                        self.log.info('does not match nSample')
                        mismatch = True
                    if selectionOnly != self.selectionOnly:
                        self.log.info('does not match selectionOnly')
                        mismatch = True
                    if invariantCovariance != self.invariantCovariance:
                        self.log.info('does not match invariantCovariance')
                        mismatch = True

                    if mismatch:
                        self.log.info('Skipping %s' % file)
                        continue

                for s in cat:
                    ti = self.bfd.TemplateInfo()
                    ti.m = s.get('m')
                    ti.dm = s.get('dm').reshape(self.bfd.BFDConfig.MSIZE, self.bfd.BFDConfig.DSIZE)
                    ti.dxy = s.get('dxy').reshape(self.bfd.BFDConfig.XYSIZE, self.bfd.BFDConfig.DSIZE)
                    ti.nda = s.get('nda')
                    ti.id = s.get('bfd_id')

                    self.prior.addTemplateInfo(ti)

            except Exception as e:
                print('Failed to read', e)
                continue
        self.prior.prepare()
        self.initialized = True
    def defineSchema(self, refSchema):
        """Return the Schema for the output catalog.

        This may add or modify self.

        Parameters
        ----------
        refSchema : `lsst.afw.table.Schema`
            Schema of the input reference catalogs.

        Returns
        -------
        outputSchema : `lsst.afw.table.Schema`
            Schema of the output catalog.  Will be added as ``self.schema``
            by calling code.
        """
        self.mapper = SchemaMapper(refSchema)
        self.mapper.addMinimalSchema(SourceCatalog.Table.makeMinimalSchema(), True)
        schema = self.mapper.getOutputSchema()

        config=self.cdict

        model=config['obj']['model']
        n=self.get_namer()
        pn=self.get_psf_namer()

        # generic ngmix fields
        mtypes=[
            (n('flags'),'overall flags for the processing',np.int32,''),
            (n('stamp_size'),'size of postage stamp',np.int32,''),
            (n('maskfrac'),'mean masked fraction',np.float32,''),
        ]
        for filt in config['filters']:
            mtypes += [
                (n('maskfrac_%s' % filt),'masked fraction in %s filter' % filt,np.float32,''),
            ]


        # psf fitting related fields
        mtypes += [
            (pn('flags'),'overall flags for the PSF processing',np.int32,''),

            # mean over filters
            (pn('g2_mean'),'mean over filters of component 2 of the PSF ellipticity',np.float64,''),
            (pn('g1_mean'),'mean over filters of component 2 of the PSF ellipticity',np.float64,''),
            (pn('T_mean'),'mean over filters <x^2> + <y^2> for the gaussian mixture',np.float64,'arcsec^2'),
        ]

        # PSF measurements by filter
        for filt in config['filters']:
            pfn=self.get_psf_namer(filt=filt)
            mtypes += [
                (pfn('flags'), 'overall flags for PSF processing in %s filter' % filt, np.int32, ''),
                (pfn('row'),'offset from canonical row position',np.float64,'arcsec'),
                (pfn('col'),'offset from canonical col position',np.float64,'arcsec'),
                (pfn('g1'), 'component 1 of the PSF ellipticity in %s filter' % filt, np.float64, ''),
                (pfn('g2'), 'component 2 of the PSF ellipticity in %s filter' % filt, np.float64, ''),
                (pfn('T'), '<x^2> + <y^2> for the PSF in %s filter' % filt, np.float64, 'arcsec^2'),
            ]

        # PSF flux measurements, on the object, by filter
        #for filt in config['filters']:
        #    pfn=self.get_psf_flux_namer(filt)
        #    mtypes += [
        #        (pfn('flux_flags'),'flags for PSF template flux fitting in the %s filter' % filt,np.float64,''),
        #        (pfn('flux'),'PSF template flux in the %s filter' % filt,np.float64,''),
        #        (pfn('flux_err'),'error on PSF template flux in the %s filter' % filt,np.float64,''),
        #    ]

        # object fitting related fields
        for type in config['metacal']['types']:
            mn=self.get_model_namer(type=type)
            mtypes += [
                (mn('flags'),'flags for model fit',np.int32,''),
                (mn('nfev'),'number of function evaluations during fit',np.int32,''),
                (mn('chi2per'),'chi^2 per degree of freedom',np.float64,''),
                (mn('dof'),'number of degrees of freedom',np.int32,''),

                (mn('s2n'),'S/N for the fit',np.float64,''),

                (mn('row'),'offset from canonical row position',np.float64,'arcsec'),
                (mn('row_err'),'error on offset from canonical row position',np.float64,'arcsec'),
                (mn('col'),'offset from canonical col position',np.float64,'arcsec'),
                (mn('col_err'),'error on offset from canonical col position',np.float64,'arcsec'),
                (mn('g1'),'component 1 of the ellipticity',np.float64,''),
                (mn('g1_err'),'error on component 1 of the ellipticity',np.float64,''),
                (mn('g2'),'component 2 of the ellipticity',np.float64,''),
                (mn('g2_err'),'error on component 2 of the ellipticity',np.float64,''),
                (mn('T'),'<x^2> + <y^2> for the gaussian mixture',np.float64,'arcsec^2'),
                (mn('T_err'),'error on <x^2> + <y^2> for the gaussian mixture',np.float64,'arcsec^2'),
            ]
            if model in ['bd','bdf']:
                mtypes += [
                    (mn('fracdev'),'fraction of light in the bulge',np.float64,''),
                    (mn('fracdev_err'),'error on fraction of light in the bulge',np.float64,''),
                ]

            for filt in config['filters']:
                mfn=self.get_model_flux_namer(filt, type=type)
                mtypes += [
                    (mfn('flux'),'flux in the %s filter' % filt,np.float64,''),
                    (mfn('flux_err'),'error on flux in the %s filter' % filt,np.float64,''),
                ]

        for name,doc,dtype,units in mtypes:
            schema.addField(
                name,
                type=dtype,
                doc=doc,
                units=units,
            )

        return schema
Exemplo n.º 23
0
def loadAndMatchData(repo, visitDataIds,
                     matchRadius=afwGeom.Angle(1, afwGeom.arcseconds),
                     verbose=False):
    """Load data from specific visit.  Match with reference.

    Parameters
    ----------
    repo : string
        The repository.  This is generally the directory on disk
        that contains the repository and mapper.
    visitDataIds : list of dict
        List of `butler` data IDs of Image catalogs to compare to reference.
        The `calexp` cpixel image is needed for the photometric calibration.
    matchRadius :  afwGeom.Angle().
        Radius for matching.
    verbose : bool, optional
        Output additional information on the analysis steps.

    Returns
    -------
    afw.table.GroupView
        An object of matched catalog.
    """

    # Following
    # https://github.com/lsst/afw/blob/tickets/DM-3896/examples/repeatability.ipynb
    butler = dafPersist.Butler(repo)
    dataset = 'src'

    # 2016-02-08 MWV:
    # I feel like I could be doing something more efficient with
    # something along the lines of the following:
    #    dataRefs = [dafPersist.ButlerDataRef(butler, vId) for vId in visitDataIds]

    ccdKeyName = getCcdKeyName(visitDataIds[0])

    schema = butler.get(dataset + "_schema", immediate=True).schema
    mapper = SchemaMapper(schema)
    mapper.addMinimalSchema(schema)
    mapper.addOutputField(Field[float]('base_PsfFlux_snr', "PSF flux SNR"))
    mapper.addOutputField(Field[float]('base_PsfFlux_mag', "PSF magnitude"))
    mapper.addOutputField(Field[float]('base_PsfFlux_magerr', "PSF magnitude uncertainty"))
    newSchema = mapper.getOutputSchema()

    # Create an object that can match multiple catalogs with the same schema
    mmatch = MultiMatch(newSchema,
                        dataIdFormat={'visit': int, ccdKeyName: int},
                        radius=matchRadius,
                        RecordClass=SimpleRecord)

    # create the new extented source catalog
    srcVis = SourceCatalog(newSchema)

    for vId in visitDataIds:
        try:
            calexpMetadata = butler.get("calexp_md", vId, immediate=True)
        except FitsError as fe:
            print(fe)
            print("Could not open calibrated image file for ", vId)
            print("Skipping %s " % repr(vId))
            continue
        except TypeError as te:
            # DECam images that haven't been properly reformatted
            # can trigger a TypeError because of a residual FITS header
            # LTV2 which is a float instead of the expected integer.
            # This generates an error of the form:
            #
            # lsst::pex::exceptions::TypeError: 'LTV2 has mismatched type'
            #
            # See, e.g., DM-2957 for details.
            print(te)
            print("Calibration image header information malformed.")
            print("Skipping %s " % repr(vId))
            continue

        calib = afwImage.Calib(calexpMetadata)

        oldSrc = butler.get('src', vId, immediate=True)
        print(len(oldSrc), "sources in ccd %s  visit %s" % (vId[ccdKeyName], vId["visit"]))

        # create temporary catalog
        tmpCat = SourceCatalog(SourceCatalog(newSchema).table)
        tmpCat.extend(oldSrc, mapper=mapper)
        tmpCat['base_PsfFlux_snr'][:] = tmpCat['base_PsfFlux_flux'] / tmpCat['base_PsfFlux_fluxSigma']
        with afwImageUtils.CalibNoThrow():
            (tmpCat['base_PsfFlux_mag'][:], tmpCat['base_PsfFlux_magerr'][:]) = \
             calib.getMagnitude(tmpCat['base_PsfFlux_flux'],
                                tmpCat['base_PsfFlux_fluxSigma'])

        srcVis.extend(tmpCat, False)
        mmatch.add(catalog=tmpCat, dataId=vId)

    # Complete the match, returning a catalog that includes
    # all matched sources with object IDs that can be used to group them.
    matchCat = mmatch.finish()

    # Create a mapping object that allows the matches to be manipulated
    # as a mapping of object ID to catalog of sources.
    allMatches = GroupView.build(matchCat)

    return allMatches
Exemplo n.º 24
0
class MeasureCoaddsPriorTask(ProcessCoaddsTogetherTask):
    """
    Base class for ngmix tasks
    """
    _DefaultName = "MeasureCoaddsPriorTask"
    ConfigClass = MeasureCoaddsPriorConfig

    def __init__(self,
                 *,
                 config=None,
                 refSchema=None,
                 butler=None,
                 initInputs=None,
                 **kwds):
        #import pdb;pdb.set_trace()
        ProcessCoaddsTogetherTask.__init__(self,
                                           config=config,
                                           refSchema=refSchema,
                                           butler=butler,
                                           initInputs=initInputs,
                                           **kwds)
        if refSchema is None:
            if butler is None:
                if initInputs is not None:
                    refSchema = initInputs.get("refSchema", None)
                if refSchema is None:
                    refSchema = SourceCatalog.Table.makeMinimalSchema()
            else:
                refSchema = butler.get(self.config.ref.name + "_schema").schema
        self.ncolors = len(self.config.filters) - 1
        self.bfd = BFDConfig(use_conc=self.config.use_conc,
                             use_mag=self.config.use_mag,
                             ncolors=self.ncolors)
        self.n_even = self.bfd.BFDConfig.MSIZE
        self.n_odd = self.bfd.BFDConfig.XYSIZE
        self.weight = KSigmaWeightF(self.config.weight_sigma,
                                    self.config.weight_n)
        self.schema = self.defineSchema(refSchema)
        self.ud = UniformDeviate()

    def getCovariances(self):
        cat = afwTable.BaseCatalog.readFits(self.config.covFile)

        covList = []

        for rec in cat:
            cov_even = rec.get('isoCovEven')
            cov_odd = rec.get('isoCovOdd')
            label = rec.get('label')
            minVariance = rec.get('min')
            maxVariance = rec.get('max')
            full_cov_even = np.zeros((self.n_even, self.n_even),
                                     dtype=np.float32)
            full_cov_odd = np.zeros((self.n_odd, self.n_odd), dtype=np.float32)

            start = 0
            for i in range(self.n_even):
                full_cov_even[i][i:] = cov_even[start:start + self.n_even - i]
                start += self.n_even - i

            for i in range(self.n_even):
                for j in range(i):
                    full_cov_even[i, j] = full_cov_even[j, i]

            start = 0
            for i in range(self.n_odd):
                full_cov_odd[i][i:] = cov_odd[start:start + self.n_odd - i]
                start += self.n_odd - i

            for i in range(self.n_odd):
                for j in range(i):
                    full_cov_odd[i, j] = full_cov_odd[j, i]

            covList.append(
                (full_cov_even, full_cov_odd, label, minVariance, maxVariance))

        return covList

    def defineSchema(self, refSchema):

        self.mapper = SchemaMapper(refSchema)
        self.mapper.addMinimalSchema(SourceCatalog.Table.makeMinimalSchema(),
                                     True)
        schema = self.mapper.getOutputSchema()
        self.mKey = schema.addField("m",
                                    doc="template m",
                                    type="ArrayF",
                                    size=self.bfd.BFDConfig.MXYSIZE)
        self.dmKey = schema.addField("dm",
                                     doc="template m",
                                     type="ArrayF",
                                     size=self.bfd.BFDConfig.MSIZE *
                                     self.bfd.BFDConfig.DSIZE)
        self.dxyKey = schema.addField("dxy",
                                      doc="template m",
                                      type="ArrayF",
                                      size=self.bfd.BFDConfig.XYSIZE *
                                      self.bfd.BFDConfig.DSIZE)
        self.ndaKey = schema.addField("nda", doc="nda", type=np.float)
        self.idKey = schema.addField("bfd_id", doc="id", type=np.int64)
        if self.config.zFile:
            self.zKey = schema.addField("z", doc="redshift", type=np.float)
            # self.zIdKey = schema.addField("z_id", doc="redshift", type=np.int64)

        return schema

    def runDataRef(self, patchRefList):
        """Run this task via CmdLineTask and Gen2 Butler.
        Parameters
        ----------
        patchRefList : `list` of `lsst.daf.persistence.ButlerDataRef`
            A list of DataRefs for all filters in a single patch.
        """
        images = {}
        replacers = {}
        mergedDataId = {
            "tract": patchRefList[0].dataId["tract"],
            "patch": patchRefList[0].dataId["patch"]
        }
        butler = patchRefList[0].butlerSubset.butler
        ref = butler.get("deepCoadd_ref", dataId=mergedDataId)
        imageId = butler.get("deepMergedCoaddId", dataId=mergedDataId)
        moments = butler.get('deepCoadd_moments', dataId=mergedDataId)

        for patchRef in patchRefList:
            filt = getShortFilterName(patchRef.dataId["filter"])
            images[filt] = patchRef.get(self.config.images.name)

            fpCat = patchRef.get(self.config.deblendCatalog.name)
            footprints = {
                rec.getId(): (rec.getParent(), rec.getFootprint())
                for rec in fpCat
            }
            replacers[filt] = NoiseReplacer(self.config.deblendReplacer,
                                            exposure=images[filt],
                                            footprints=footprints,
                                            exposureId=imageId)
        results = self.run(butler,
                           mergedDataId,
                           images,
                           ref,
                           moments,
                           imageId=imageId,
                           replacers=replacers)

    def run(self, butler, dataId, images, ref, moments, imageId, replacers):
        """Process coadds from all bands for a single patch.
        This method should not add or modify self.
        So far all children are using this exact code so leaving
        it here for now. If we specialize a lot, might make a
        processor its own object
        Parameters
        ----------
        images : `dict` of `lsst.afw.image.ExposureF`
            Coadd images and associated metadata, keyed by filter name.
        ref : `lsst.afw.table.SourceCatalog`
            A catalog with one record for each object, containing "best"
            measurements across all bands.
        replacers : `dict` of `lsst.meas.base.NoiseReplacer`, optional
            A dictionary of `~lsst.meas.base.NoiseReplacer` objects that can
            be used to insert and remove deblended pixels for each object.
            When not `None`, all detected pixels in ``images`` will have
            *already* been replaced with noise, and this *must* be used
            to restore objects one at a time.
        imageId : `int`
            Unique ID for this unit of data.  Should be used (possibly
            indirectly) to seed random numbers.
        Returns
        -------
        results : `lsst.pipe.base.Struct`
            Struct with (at least) an `output` attribute that is a catalog
            to be written as ``self.config.output``.
        """

        if len(images) != len(self.config.filters) != len(moments):
            self.log.info(
                'Number of filters does not match the list of images given.  Skipping'
            )
            return None

        covList = self.getCovariances()

        if self.config.zFile:
            zFile = Table.read(self.config.zFile)

            z_redshift = zFile[self.config.zField]
            # z_id = zFile[self.config.zId]

            z_catalog = SkyCoord(ra=zFile[self.config.zRa] * u.deg,
                                 dec=zFile[self.config.zDec] * u.deg)
            self.z_list = {}
            # self.z_id_list = {}

            coord = SkyCoord(ra=ref['coord_ra'] * u.rad,
                             dec=ref['coord_dec'] * u.rad)

            idx, d2d, d3d = match_coordinates_sky(coord, z_catalog)
            d2d = d2d.arcsec

        # we only need to compute the templates the first time through the loop
        first = True
        self.templates = []
        for cov_even, cov_odd, label, varMin, varMax in covList:
            tm0 = time.time()
            nproc = 0

            if (label not in self.config.useLabels) and len(
                    self.config.useLabels) > 0:
                self.log.info("Label %s not in %s" %
                              (label, self.config.useLabels))
                continue

            # Build full label
            full_label = label + self.config.label
            if self.config.selectionOnly:
                full_label += '_selection'

            if self.config.noiseBin is not None:
                full_label += '_n%d' % self.config.noiseBin

            if self.config.zBin is not None:
                full_label += '_z%d' % (self.config.zBin)

            self.log.info('Processing label %s' % label)
            sigmaFlux = np.sqrt(cov_even[0, 0])

            minFlux = self.config.snMin * sigmaFlux
            if self.config.fluxMin is not None:
                minFlux = self.config.fluxMin
            elif self.config.magMin is not None:

                # Current assumption is that coadd zeropoint is 27, true for HSC
                minFlux = 10**(-0.4 * (self.config.magMin - 27))

            maxFlux = self.config.snMax * sigmaFlux
            if self.config.fluxMax is not None:
                maxFlux = self.config.fluxMax
            elif self.config.magMax is not None:
                maxFlux = 10**(-0.4 * (self.config.magMax - 27))
            self.log.info('Min/Max %0.2f/%0.2f', minFlux, maxFlux)

            covMat = self.bfd.MomentCov(cov_even, cov_odd)
            momentPrior = self.bfd.KDTreePrior(
                minFlux, maxFlux, covMat, self.ud, self.config.nSample,
                self.config.selectionOnly, self.config.noiseFactor,
                self.config.priorSigmaStep, self.config.priorSigmaCutoff,
                self.config.priorSigmaBuffer, self.config.invariantCovariance)

            if first == True:
                last_index = self.config.num_to_process
                if last_index is None:
                    last_index = len(ref)

                for n, (refRecord, moment) in enumerate(
                        zip(ref[:last_index], moments[:last_index])):

                    self.log.info(f'Processing {n}/{last_index}')
                    if refRecord.get('deblend_nChild') != 0:
                        continue

                    if moment.get('bfd_flag') is True:
                        continue

                    if self.config.zFile:
                        if d2d[n] < self.config.zDistCut:
                            match_z = z_redshift[idx[n]]
                            # id_z = z_id[idx]
                        else:
                            self.log.info('No matching redshift')
                            continue

                        if match_z < self.config.zMinCut or match_z > self.config.zMaxCut:
                            self.log.info(
                                f'Does not match redshift range {match_z:0.2}')
                            continue
                        self.z_list[refRecord.getId()] = match_z

                    cov = moment.get(f'bfd_cov_even_{self.config.filters[0]}')
                    if cov[0] > self.config.maxVar and cov[
                            0] < self.config.minVar:
                        continue

                    # Insert the deblended pixels for just this object into all images.
                    for r in replacers.values():
                        r.insertSource(refRecord.getId())

                    try:
                        kgals = self.buildKGalaxy(refRecord, images)
                        kc = KColorGalaxy(self.bfd,
                                          kgals,
                                          id=refRecord.getId())

                    except Exception as e:
                        kc = None
                        continue

                    dx, badcentering, msg = kc.recenter(
                        self.config.weight_sigma)

                    if badcentering:
                        self.log.info('Bad centering %s', msg)
                        continue

                    template = kc.get_template(dx[0], dx[1])
                    self.templates.append(template)
                    nproc += 1

                    # Remove the deblended pixels for this object so we can process the next one.
                    for r in replacers.values():
                        r.removeSource(refRecord.getId())
                first = False

            for temp in self.templates:
                momentPrior.addTemplate(temp)

            tm = time.time() - tm0
            if nproc > 0:
                self.log.info(f'added {nproc} templates')
                self.log.info(f'time: {tm/60.0} min')
                self.log.info(f'time per: {tm/nproc} sec')

            outputCat = self.buildCatalog(momentPrior)

            dataId['label'] = full_label
            dataId['filter'] = self.config.filters[0]

            metadata = dafBase.PropertyList()
            metadata.set('cov_even', np.array(cov_even.flatten(), dtype=float))
            metadata.set('cov_odd', np.array(cov_odd.flatten(), dtype=float))
            metadata.set('fluxMin', minFlux)
            metadata.set('fluxMax', maxFlux)
            metadata.set('varMin', varMin)
            metadata.set('varMax', varMax)
            if self.config.zFile is not None:
                metadata.set('zFile', self.config.zFile)
                metadata.set('zField', self.config.zField)
            if self.config.zMaxCut is not None:
                metadata.set('zMaxCut', self.config.zMaxCut)
            if self.config.zMinCut is not None:
                metadata.set('zMinCut', self.config.zMinCut)
            if self.config.noiseBin is not None:
                metadata.set('noiseBin', self.config.noiseBin)
            metadata.set('noiseFactor', self.config.noiseFactor)
            metadata.set('priorSigmaCutoff', self.config.priorSigmaCutoff)
            metadata.set('priorSigmaStep', self.config.priorSigmaStep)
            metadata.set('priorSigmaBuffer', self.config.priorSigmaBuffer)
            metadata.set('nsample', self.config.nSample)
            metadata.set('selectionOnly', self.config.selectionOnly)
            metadata.set('invariantCovariance',
                         self.config.invariantCovariance)
            metadata.set('maxXY', self.config.maxXY)
            metadata.set('sigma', self.config.weight_sigma)
            metadata.set('wIndex', self.config.weight_n)
            metadata.set('covFile', self.config.covFile)

            outputCat.getTable().setMetadata(metadata)

            butler.put(outputCat, self.config.output.name, dataId)

        # Restore all original pixels in the images.
        if replacers is not None:
            for r in replacers.values():
                r.end()

    def buildCatalog(self, momentPrior):

        outCat = afwTable.BaseCatalog(self.schema)

        for temp in momentPrior.templates:
            rec = outCat.addNew()
            rec.set(self.mKey, temp.m)
            rec.set(self.dmKey, temp.dm.flatten())
            rec.set(self.dxyKey, temp.dxy.flatten())
            rec.set(self.ndaKey, temp.nda)
            rec.set(self.idKey, temp.id)
            if self.config.zFile:
                rec.set(self.zKey, self.z_list[temp.id])
                # rec.set(self.zIdKey, self.z_id_list[temp.id])

        return outCat

    def buildKGalaxy(self, record, exposures):

        center = record.getCentroid()
        band = self.config.filters[0]
        local_lin_wcs = exposures[band].getWcs().linearizePixelToSky(
            center, afwGeom.arcseconds)

        jacobian = local_lin_wcs.getLinear().getMatrix()
        sky_pos = exposures[band].getWcs().pixelToSky(center)
        uvref = (sky_pos.getRa().asArcseconds(),
                 sky_pos.getDec().asArcseconds())

        box = record.getFootprint().getBBox()
        xy_pos = (center.getX() - box.getMinX(), center.getY() - box.getMinY())

        bfd_wcs = bfd.WCS(jacobian, xyref=xy_pos, uvref=uvref)

        kgals = []
        for band in self.config.filters:
            exposure = exposures[band]
            factor = exposure.getMetadata().get('variance_scale')
            noise = np.sqrt(np.median(exposure.variance[box].array) / factor)
            image = exposure.image[box].array

            psf_image = exposure.getPsf().computeKernelImage(center).array

            kdata = bfd.generalImage(image,
                                     uvref,
                                     psf_image,
                                     wcs=bfd_wcs,
                                     pixel_noise=noise,
                                     size=self.config.grid_size)
            conjugate = set(np.where(kdata.conjugate.flatten() == False)[0])
            kgal = self.bfd.KGalaxy(self.weight, kdata.kval.flatten(),
                                    kdata.kx.flatten(), kdata.ky.flatten(),
                                    kdata.kvar.flatten(), kdata.d2k, conjugate)
            kgals.append(kgal)
        return kgals

    def selection(self, ref):
        childName = 'deblend_nChild'
        if ref.getParent() == 0 and ref.get(childName) > 0:
            return False
        return True
Exemplo n.º 25
0
def getFakeSources(butler,
                   dataId,
                   tol=1.0,
                   extraCols=('zeropoint', 'visit', 'ccd'),
                   includeMissing=False,
                   footprints=False,
                   radecMatch=None,
                   multiband=False,
                   reffMatch=False,
                   pix=0.168,
                   minRad=None,
                   raCol='RA',
                   decCol='Dec'):
    """
    Get list of sources which agree in pixel position with fake ones with tol.

    This returns a sourceCatalog of all the matched fake objects,
    note, there will be duplicates in this list, since I haven't
    checked deblend.nchild, and I'm only doing a tolerance match,
    which could include extra sources

    The outputs can include extraCols as long as they are one of:
        zeropoint, visit, ccd, thetaNorth, pixelScale

    If includeMissing is true, then the pipeline looks at the fake sources
    added in the header and includes an entry in the table for sources without
    any measurements, specifically the 'id' column will be 0

    radecMatch is the fakes table. if it's not None(default), then do an ra/dec
    match with the input catalog instead of looking in the header for where the
    sources where added
    """
    coaddData = "deepCoadd_calexp"
    coaddMeta = "deepCoadd_calexp_md"

    availExtras = {
        'zeropoint': {
            'type': float,
            'doc': 'zeropoint'
        },
        'visit': {
            'type': int,
            'doc': 'visit id'
        },
        'ccd': {
            'type': int,
            'doc': 'ccd id'
        },
        'thetaNorth': {
            'type': lsst.afw.geom.Angle,
            'doc': 'angle to north'
        },
        'pixelScale': {
            'type': float,
            'doc': 'pixelscale in arcsec/pixel'
        }
    }

    if not np.in1d(extraCols, list(availExtras.keys())).all():
        print("extraCols must be in ", availExtras)

    try:
        if 'filter' not in dataId:
            sources = butler.get('src',
                                 dataId,
                                 flags=lsst.afw.table.SOURCE_IO_NO_FOOTPRINTS,
                                 immediate=True)
            cal = butler.get('calexp', dataId, immediate=True)
            cal_md = butler.get('calexp_md', dataId, immediate=True)
        else:
            meas = butler.get('deepCoadd_meas',
                              dataId,
                              flags=NO_FOOTPRINT,
                              immediate=True)
            force = butler.get('deepCoadd_forced_src',
                               dataId,
                               flags=NO_FOOTPRINT,
                               immediate=True)
            sources = combineWithForce(meas, force)
            cal = butler.get(coaddData, dataId, immediate=True)
            cal_md = butler.get(coaddMeta, dataId, immediate=True)
    except RuntimeError:
        print("skipping", dataId)
        return None

    if ('pixelScale' in extraCols) or ('thetaNorth' in extraCols):
        wcs = cal.getWcs()
        availExtras['pixelScale']['value'] = wcs.getPixelScale().asArcseconds()
        # The 8 lines of code below find the angle to north, first the mid pixel of the calexp is found,
        # then the pixel to sky matrix at this point, the coordinate this gives can then be used to find the
        # linearized sky to pixel matrix which can then be used to find the angle.
        xMid = cal.getWidth() // 2
        yMid = cal.getHeight() // 2
        midPoint = lsst.afw.geom.Point2D(xMid, yMid)
        midCoord = wcs.pixelToSky(midPoint)
        northSkyToPixelMatrix = wcs.linearizeSkyToPixel(
            midCoord, lsst.afw.geom.degrees)
        northSkyToPixelMatrix = northSkyToPixelMatrix.getLinear()
        availExtras['thetaNorth']['value'] = (np.arctan2(
            *tuple(northSkyToPixelMatrix(lsst.afw.geom.Point2D(1.0, 0.0))))
                                              ) * lsst.afw.geom.radians

    if 'visit' in extraCols:
        availExtras['visit']['value'] = dataId['visit']
    if 'ccd' in extraCols:
        availExtras['ccd']['value'] = dataId['ccd']
    if 'zeropoint' in extraCols:
        zeropoint = 2.5 * np.log10(cal_md.getScalar('FLUXMAG0'))
        availExtras['zeropoint']['value'] = zeropoint

    if radecMatch is None:
        fakeXY, srcIndex = getFakeMatchesHeader(cal_md, sources, tol=tol)
    else:
        if minRad is not None:
            print("# The min matching radius is %4.1f pixel" % minRad)
        bbox = lsst.afw.geom.Box2D(cal.getBBox(lsst.afw.image.PARENT))
        fakeXY, srcIndex, srcClose = getFakeMatchesRaDec(sources,
                                                         radecMatch,
                                                         bbox,
                                                         cal.getWcs(),
                                                         tol=tol,
                                                         reffMatch=reffMatch,
                                                         pix=pix,
                                                         minRad=minRad,
                                                         raCol=raCol,
                                                         decCol=decCol)

    mapper = SchemaMapper(sources.schema)
    mapper.addMinimalSchema(sources.schema)
    newSchema = mapper.getOutputSchema()
    newSchema.addField('fakeId',
                       type=np.int32,
                       doc='id of fake source matched to position')
    newSchema.addField('nMatched',
                       type=np.int32,
                       doc='Number of matched objects')
    newSchema.addField('nPrimary',
                       type=np.int32,
                       doc='Number of unique matched objects')
    newSchema.addField('nNoChild',
                       type=np.int32,
                       doc='Number of matched objects with nchild==0')
    newSchema.addField('rMatched',
                       type=float,
                       doc='Radius used form atching obects, in pixel')
    newSchema.addField('fakeOffX',
                       type=float,
                       doc='offset from input fake position in X (pixels)')
    newSchema.addField('fakeOffY',
                       type=float,
                       doc='offset from input fake position in Y (pixels)')
    newSchema.addField('fakeOffR',
                       type=float,
                       doc='offset from input fake position in radius')
    newSchema.addField('fakeClosest',
                       type="Flag",
                       doc='Is this match the closest one?')

    for extraName in set(extraCols).intersection(availExtras):
        newSchema.addField(extraName,
                           type=availExtras[extraName]['type'],
                           doc=availExtras[extraName]['doc'])

    srcList = SourceCatalog(newSchema)
    srcList.reserve(
        sum([len(s) for s in srcIndex.values()]) +
        (0 if not includeMissing else list(srcIndex.values()).count([])))

    centroidKey = sources.getCentroidKey()
    isPrimary = sources.schema.find('detect_isPrimary').getKey()
    nChild = sources.schema.find('force_deblend_nChild').getKey()
    for ident, sindlist in srcIndex.items():
        rMatched = fakeXY[ident][2]
        if minRad is not None:
            if rMatched < minRad:
                rMatched = minRad
        nMatched = len(sindlist)
        nPrimary = np.sum(
            [sources[int(obj)].get(isPrimary) for obj in sindlist])
        nNoChild = np.sum([(sources[int(obj)].get(nChild) == 0)
                           for obj in sindlist])
        if includeMissing and (nMatched == 0):
            newRec = srcList.addNew()
            newRec.set('fakeId', ident)
            newRec.set('id', 0)
            newRec.set('nMatched', 0)
            newRec.set('rMatched', rMatched)
        for ss in sindlist:
            newRec = srcList.addNew()
            newRec.assign(sources[int(ss)], mapper)
            newRec.set('fakeId', ident)
            newRec.set('nMatched', nMatched)
            newRec.set('nPrimary', nPrimary)
            newRec.set('nNoChild', nNoChild)
            newRec.set('rMatched', rMatched)
            offsetX = (sources[int(ss)].get(centroidKey).getX() -
                       fakeXY[ident][0])
            newRec.set('fakeOffX', offsetX)
            offsetY = (sources[int(ss)].get(centroidKey).getY() -
                       fakeXY[ident][1])
            newRec.set('fakeOffY', offsetY)
            newRec.set('fakeOffR', np.sqrt(offsetX**2.0 + offsetY**2.0))
            if radecMatch:
                if int(ss) == int(srcClose[ident]):
                    newRec.set('fakeClosest', True)
                else:
                    newRec.set('fakeClosest', False)

    if includeMissing:
        srcList = srcList.copy(deep=True)

    for extraName in set(extraCols).intersection(availExtras):
        tempCol = srcList.get(extraName)
        tempCol.fill(availExtras[extraName]['value'])

    return srcList
Exemplo n.º 26
0
def getFakeSources(butler,
                   dataId,
                   tol=1.0,
                   extraCols=('zeropoint', 'visit', 'ccd'),
                   includeMissing=False,
                   footprints=False,
                   radecMatch=None):
    """Get list of sources which agree in pixel position with fake ones with tol
    
    this returns a sourceCatalog of all the matched fake objects,
    note, there will be duplicates in this list, since I haven't checked deblend.nchild,
    and I'm only doing a tolerance match, which could include extra sources
    
    the outputs can include extraCols as long as they are one of:
      zeropoint, visit, ccd, thetaNorth, pixelScale

    if includeMissing is true, then the pipeline looks at the fake sources
    added in the header and includes an entry in the table for sources without
    any measurements, specifically the 'id' column will be 0

    radecMatch is the fakes table. if it's not None(default), then do an ra/dec 
    match with the input catalog instead of looking in the header for where the 
    sources where added
    """

    availExtras = {
        'zeropoint': {
            'type': float,
            'doc': 'zeropoint'
        },
        'visit': {
            'type': int,
            'doc': 'visit id'
        },
        'ccd': {
            'type': int,
            'doc': 'ccd id'
        },
        'thetaNorth': {
            'type': lsst.afw.geom.Angle,
            'doc': 'angle to north'
        },
        'pixelScale': {
            'type': float,
            'doc': 'pixelscale in arcsec/pixel'
        }
    }

    if not np.in1d(extraCols, availExtras.keys()).all():
        print "extraCols must be in ", availExtras

    try:
        if not 'filter' in dataId:
            sources = butler.get('src',
                                 dataId,
                                 flags=lsst.afw.table.SOURCE_IO_NO_FOOTPRINTS,
                                 immediate=True)
            cal = butler.get('calexp', dataId, immediate=True)
            cal_md = butler.get('calexp_md', dataId, immediate=True)
        else:
            sources = butler.get('deepCoadd_src',
                                 dataId,
                                 flags=lsst.afw.table.SOURCE_IO_NO_FOOTPRINTS,
                                 immediate=True)
            cal = butler.get('deepCoadd', dataId, immediate=True)
            cal_md = butler.get('deepCoadd_md', dataId, immediate=True)
    except (lsst.pex.exceptions.LsstException, RuntimeError) as e:
        print "skipping", dataId
        return None

    if ('pixelScale' in extraCols) or ('thetaNorth' in extraCols):
        wcs = cal.getWcs()
        availExtras['pixelScale']['value'] = wcs.pixelScale().asArcseconds()
        availExtras['thetaNorth']['value'] = lsst.afw.geom.Angle(
            np.arctan2(*tuple(wcs.getLinearTransform().invert()(
                lsst.afw.geom.Point2D(1.0, 0.0)))))
    if 'visit' in extraCols:
        availExtras['visit']['value'] = dataId['visit']
    if 'ccd' in extraCols:
        availExtras['ccd']['value'] = dataId['ccd']
    if 'zeropoint' in extraCols:
        availExtras['zeropoint']['value'] = 2.5 * np.log10(
            cal_md.get('FLUXMAG0'))

    if radecMatch is None:
        fakeXY, srcIndex = getFakeMatchesHeader(cal_md, sources, tol=tol)
    else:
        fakeXY, srcIndex = getFakeMatchesRaDec(
            sources,
            radecMatch,
            lsst.afw.geom.Box2D(cal.getBBox(lsst.afw.image.PARENT)),
            cal.getWcs(),
            tol=tol)

    mapper = SchemaMapper(sources.schema)
    mapper.addMinimalSchema(sources.schema)
    newSchema = mapper.getOutputSchema()
    newSchema.addField('fakeId',
                       type=int,
                       doc='id of fake source matched to position')
    newSchema.addField('fakeOffset',
                       type=lsst.afw.geom.Point2D,
                       doc='offset from input fake position (pixels)')

    for extraName in set(extraCols).intersection(availExtras):
        newSchema.addField(extraName,
                           type=availExtras[extraName]['type'],
                           doc=availExtras[extraName]['doc'])

    srcList = SourceCatalog(newSchema)
    srcList.reserve(
        sum([len(s) for s in srcIndex.values()]) +
        (0 if not includeMissing else srcIndex.values().count([])))

    centroidKey = sources.schema.find('centroid.sdss').getKey()
    for ident, sindlist in srcIndex.items():
        if includeMissing and (len(sindlist) == 0):
            newRec = srcList.addNew()
            newRec.set('fakeId', ident)
            newRec.set('id', 0)
        for ss in sindlist:
            newRec = srcList.addNew()
            newRec.assign(sources[ss], mapper)
            newRec.set('fakeId', ident)
            newRec.set(
                'fakeOffset',
                lsst.afw.geom.Point2D(
                    sources[ss].get(centroidKey).getX() - fakeXY[ident][0],
                    sources[ss].get(centroidKey).getY() - fakeXY[ident][1]))

    if includeMissing:
        srcList = srcList.copy(deep=True)

    for extraName in set(extraCols).intersection(availExtras):
        tempCol = srcList.get(extraName)
        tempCol.fill(availExtras[extraName]['value'])

    return srcList