def get_fluxes(match): """Return (flux, ref_flux) or None if either is invalid.""" # NOTE: Protect against negative fluxes: ignore this match if we find one. flux = match[1]['slot_CalibFlux_flux'] if flux < 0: return None else: # convert to magnitudes and then Janskys, for a useable flux. flux = fluxFromABMag(calib.getMagnitude(flux)) # NOTE: Have to protect against negative reference fluxes too. if 'slot' in ref_flux_key: ref_flux = match[0][ref_flux_key] if ref_flux < 0: return None else: ref_flux = fluxFromABMag(refcalib.getMagnitude(ref_flux)) else: # a.net fluxes are already in Janskys. ref_flux = match[0][ref_flux_key.format(filt)] if ref_flux < 0: return None Flux = collections.namedtuple('Flux', ('flux', 'ref_flux')) return Flux(flux, ref_flux)
def make_source_catalog_from_astropy_table(out_table, debug=False): """Return an AFW SourceCatalog from an Astropy Table Written with extensive reference to https://github.com/lsst/meas_astrom/blob/master/convertToFitsTable.py """ filters = ('J', 'H', 'K') schema = makeMinimalSchema(filters=filters, debug=debug) out_cat = afwTable.SourceCatalog(schema) for row in out_table: record = out_cat.addNew() record.setId(twomass_int_id(row['2MASSID'])) record.setRa(float(row['coord_ra']) * lsst.afw.geom.degrees) record.setDec(float(row['coord_dec']) * lsst.afw.geom.degrees) for filt in filters: filtMag = '%s_mag' % filt filtMagSigma = '%s_mag_sigma' % filt filtFlux = '%s_flux' % filt filtFluxSigma = '%s_fluxSigma' % filt abMag = vegaToABMag(row[filtMag], filt) # error remains unchanged record.set(filtFlux, fluxFromABMag(abMag)) record.set(filtFluxSigma, fluxErrFromABMagErr(row[filtMagSigma], abMag)) return out_cat
def plot_point_mags(output_data, visits, outfile, dataId): # get a butler butler = dp.Butler(output_data) # The following value for refcatId is "mandatory, but meaningless", # so we won't try to generalize it. refcatId = {'tract':0, 'patch':'0,0'} ref = butler.get('deepCoadd_ref', dataId=refcatId) # visits to use for lightcurves visit_list = [int(x) for x in visits.split('^')] # get the sources and calib objects for each single epoch visit forced_srcs = {} calibs = {} for visit in visit_list: dataId['visit'] = visit forced_srcs[visit] = butler.get('forced_src', dataId=dataId) calexp = butler.get('calexp', dataId=dataId) calibs[visit] = calexp.getCalib() del calexp # initialize dictionaries to hold lightcurve arrays. Get # extendedness from the coadd catalog. lightcurve_fluxes = {} extendedness = {} for idx, ext in zip(ref.get('id'), ref.get('base_ClassificationExtendedness_value')): lightcurve_fluxes[idx] = [] extendedness[idx] = ext # pivot the source tables to assemble lightcurves for visit, forced_src in forced_srcs.iteritems(): calib = calibs[visit] for idx, flux in zip(forced_src.get('objectId'), forced_src.get('base_PsfFlux_flux')): if extendedness[idx] > 0.5: continue if flux <= 0.: continue lightcurve_fluxes[idx].append(afw_image.fluxFromABMag(calib.getMagnitude(flux))) # compute aggregate quantities for each object and plot for lightcurve in lightcurve_fluxes.values(): if len(lightcurve) == len(visit_list): plt.scatter(afw_image.abMagFromFlux(numpy.median(lightcurve)), numpy.std(lightcurve)/numpy.median(lightcurve), alpha=0.5) mags, invsnrs = make_invsnr_arr() plt.plot(mags, invsnrs, color='red', linewidth=2, alpha=0.75) plt.xlabel("Calibrated magnitude of median flux") plt.ylabel("stdev(flux)/median(flux)") plt.xlim(15.5, 25) plt.ylim(0., 0.5) plt.savefig(outfile)
def _set_mags(self, record, row, key_map): """!Set the flux records from the input magnitudes @param[in,out] record SourceCatalog record to modify @param[in] row dict like object containing magnitude values @param[in] key_map Map of catalog keys to use in filling the record """ for item in self.config.mag_column_list: record.set(key_map[item + "_flux"], fluxFromABMag(row[item])) if len(self.config.mag_err_column_map) > 0: for err_key in self.config.mag_err_column_map.keys(): error_col_name = self.config.mag_err_column_map[err_key] record.set(key_map[err_key + "_fluxSigma"], fluxErrFromABMagErr(row[error_col_name], row[err_key]))
def testBasics(self): for flux in (1, 210, 3210, 43210, 543210): abMag = afwImage.abMagFromFlux(flux) self.assertAlmostEqual(abMag, refABMagFromFlux(flux)) fluxRoundTrip = afwImage.fluxFromABMag(abMag) self.assertAlmostEqual(flux, fluxRoundTrip) for fluxErrFrac in (0.001, 0.01, 0.1): fluxErr = flux * fluxErrFrac abMagErr = afwImage.abMagErrFromFluxErr(fluxErr, flux) self.assertAlmostEqual(abMagErr, refABMagErrFromFluxErr(fluxErr, flux)) fluxErrRoundTrip = afwImage.fluxErrFromABMagErr(abMagErr, abMag) self.assertAlmostEqual(fluxErr, fluxErrRoundTrip)
def _set_mags(self, record, row, key_map): """!Set the flux records from the input magnitudes @param[in,out] record SourceCatalog record to modify @param[in] row dict like object containing magnitude values @param[in] key_map Map of catalog keys to use in filling the record """ for item in self.config.mag_column_list: record.set(key_map[item + '_flux'], fluxFromABMag(row[item])) if len(self.config.mag_err_column_map) > 0: for err_key in self.config.mag_err_column_map.keys(): error_col_name = self.config.mag_err_column_map[err_key] record.set( key_map[err_key + '_fluxSigma'], fluxErrFromABMagErr(row[error_col_name], row[err_key]))
def testVector(self): flux = np.array([1.0, 210.0, 3210.0, 43210.0, 543210.0]) flux.flags.writeable = False # Put the 'const' into ndarray::Array<double const, 1, 0> abMag = afwImage.abMagFromFlux(flux) self.assertFloatsAlmostEqual(abMag, refABMagFromFlux(flux)) fluxRoundTrip = afwImage.fluxFromABMag(abMag) self.assertFloatsAlmostEqual(flux, fluxRoundTrip, rtol=1.0e-15) for fluxErrFrac in (0.001, 0.01, 0.1): fluxErr = flux * fluxErrFrac abMagErr = afwImage.abMagErrFromFluxErr(fluxErr, flux) self.assertFloatsAlmostEqual(abMagErr, refABMagErrFromFluxErr(fluxErr, flux)) fluxErrRoundTrip = afwImage.fluxErrFromABMagErr(abMagErr, abMag) self.assertFloatsAlmostEqual(fluxErr, fluxErrRoundTrip, rtol=1.0e-15)
def _formatCatalog(self, fgcmStarCat, offsets): """ Turn an FGCM-formatted star catalog, applying zeropoint offsets. Parameters ---------- fgcmStarCat: `afwTable.SimpleCatalog` SimpleCatalog as output by fgcmcal offsets: `list` with len(self.bands) entries Zeropoint offsets to apply Returns ------- formattedCat: `afwTable.SimpleCatalog` SimpleCatalog suitable for using as a reference catalog """ sourceMapper = afwTable.SchemaMapper(fgcmStarCat.schema) minSchema = LoadIndexedReferenceObjectsTask.makeMinimalSchema( self.bands, addCentroid=False, addIsResolved=True, coordErrDim=0) sourceMapper.addMinimalSchema(minSchema) for band in self.bands: sourceMapper.editOutputSchema().addField('%s_nGood' % (band), type=np.int32) formattedCat = afwTable.SimpleCatalog(sourceMapper.getOutputSchema()) formattedCat.reserve(len(fgcmStarCat)) formattedCat.extend(fgcmStarCat, mapper=sourceMapper) # Note that we don't have to set `resolved` because the default is False for b, band in enumerate(self.bands): mag = fgcmStarCat['mag_std_noabs'][:, b] + offsets[b] # We want fluxes in Jy from calibrated AB magnitudes # (after applying offset) # TODO: Full implementation of RFC-549 will have all reference # catalogs in nJy instead of Jy. flux = afwImage.fluxFromABMag(mag) fluxErr = afwImage.fluxErrFromABMagErr( fgcmStarCat['magErr_std'][:, b], mag) formattedCat['%s_flux' % (band)][:] = flux formattedCat['%s_fluxErr' % (band)][:] = fluxErr formattedCat['%s_nGood' % (band)][:] = fgcmStarCat['ngood'][:, b] return formattedCat
def _setFlux(self, record, row, key_map): """Set flux fields in a record of an indexed catalog. Parameters ---------- record : `lsst.afw.table.SimpleRecord` Row from indexed catalog to modify. row : structured `numpy.array` Row from catalog being ingested. key_map : `dict` mapping `str` to `lsst.afw.table.Key` Map of catalog keys. """ for item in self.config.mag_column_list: record.set(key_map[item + '_flux'], fluxFromABMag(row[item])) if len(self.config.mag_err_column_map) > 0: for err_key in self.config.mag_err_column_map.keys(): error_col_name = self.config.mag_err_column_map[err_key] record.set( key_map[err_key + '_fluxErr'], fluxErrFromABMagErr(row[error_col_name], row[err_key]))
def _formatCatalog(self, fgcmStarCat, offsets): """ Turn an FGCM-formatted star catalog, applying zp offsets. parameters ---------- fgcmStarCat: SimpleCatalog SimpleCatalog as output by fgcmcal offsets: list with len(self.bands) entries Zeropoint offsets to apply returns ------- formattedCat: SimpleCatalog SimpleCatalog suitable for ref_cat """ sourceMapper = afwTable.SchemaMapper(fgcmStarCat.schema) sourceMapper.addMinimalSchema(afwTable.SimpleTable.makeMinimalSchema()) for band in self.bands: sourceMapper.editOutputSchema().addField('%s_flux' % (band), type=np.float64) sourceMapper.editOutputSchema().addField('%s_fluxErr' % (band), type=np.float64) sourceMapper.editOutputSchema().addField('%s_nGood' % (band), type=np.float64) formattedCat = afwTable.SimpleCatalog(sourceMapper.getOutputSchema()) formattedCat.reserve(len(fgcmStarCat)) formattedCat.extend(fgcmStarCat, mapper=sourceMapper) for b, band in enumerate(self.bands): mag = fgcmStarCat['mag_std_noabs'][:, b] + offsets[b] flux = afwImage.fluxFromABMag(mag) fluxErr = afwImage.fluxErrFromABMagErr( fgcmStarCat['magerr_std'][:, b], mag) formattedCat['%s_flux' % (band)][:] = flux formattedCat['%s_fluxErr' % (band)][:] = fluxErr formattedCat['%s_nGood' % (band)][:] = fgcmStarCat['ngood'][:, b] return formattedCat
def plot_point_mags(output_data, visit_list, dataId, minMag=17, mid_cut=20, maxMag=26, fit_curves=True): # get a butler butler = dp.Butler(output_data) # The following value for refcatId is "mandatory, but meaningless", # so we won't try to generalize it. refcatId = {'tract': 0, 'patch': '0,0'} ref = butler.get('deepCoadd_ref', dataId=refcatId) # get the sources and calib objects for each single epoch visit forced_srcs = {} calibs = {} for visit in visit_list: dataId['visit'] = visit try: my_forced_srcs = butler.get('forced_src', dataId=dataId) calexp = butler.get('calexp', dataId=dataId) my_calibs = calexp.getCalib() del calexp forced_srcs[visit] = my_forced_srcs calibs[visit] = my_calibs except FitsError as eobj: print(eobj) # initialize dictionaries to hold lightcurve arrays. Get # extendedness from the coadd catalog. lightcurve_fluxes = {} extendedness = {} for idx, ext in zip(ref.get('id'), ref.get('base_ClassificationExtendedness_value')): lightcurve_fluxes[idx] = [] extendedness[idx] = ext # pivot the source tables to assemble lightcurves for visit, forced_src in forced_srcs.items(): calib = calibs[visit] for idx, flux in zip(forced_src.get('objectId'), forced_src.get('base_PsfFlux_flux')): if extendedness[idx] > 0.5: continue if flux <= 0.: continue lightcurve_fluxes[idx].append( afw_image.fluxFromABMag(calib.getMagnitude(flux))) # compute aggregate quantities for each object and plot band = dataId['filter'] med_mags = [] med_err = [] for lightcurve in lightcurve_fluxes.values(): if len(lightcurve) == len(visit_list): median_flux = np.median(lightcurve) med_mags.append(afw_image.abMagFromFlux(median_flux)) med_err.append(np.std(lightcurve) / median_flux) print("number of objects: ", len(med_mags)) mag_stats = MagStats(band, med_mags, med_err, fit_curves=fit_curves) if mag_stats.pcov is not None: label ='filter=%s, Floor=%.1f%%, m_5=%0.2f' \ % (band, mag_stats.sys_floor*100, mag_stats.popt[1]) else: label ='filter=%s, Floor=%.1f%%' \ % (band, mag_stats.sys_floor*100) scatter = plt.scatter(med_mags, med_err, alpha=0.3, color=_filter_color[band], marker=_filter_symbol[band], label=label) plt.xlabel("Calibrated magnitude of median flux") plt.ylabel("stdev(flux)/median(flux)") plt.xlim(15.5, 25) plt.ylim(0., 0.5) return scatter, mag_stats
def _computeReferenceOffsets(self, butler): """ Compute offsets relative to a reference catalog. Parameters ---------- butler: lsst.daf.persistence.Butler Returns ------- offsets: np.array of floats Per band zeropoint offsets """ # Load the stars stars = butler.get('fgcmStandardStars', fgcmcycle=self.useCycle) # Only use stars that are observed in all the bands minObs = stars['ngood'].min(axis=1) # Depending on how things work, this might need to be configurable # However, I think that best results will occur when we use the same # pixels to do the calibration for all the bands, so I think this # is the right choice. goodStars = (minObs >= 1) stars = stars[goodStars] self.log.info( "Found %d stars with at least 1 good observation in each band" % (len(stars))) # We have to make a table for each pixel with flux/fluxErr sourceMapper = afwTable.SchemaMapper(stars.schema) sourceMapper.addMinimalSchema(afwTable.SimpleTable.makeMinimalSchema()) sourceMapper.editOutputSchema().addField('flux', type=np.float64, doc="flux") sourceMapper.editOutputSchema().addField('fluxErr', type=np.float64, doc="flux error") badStarKey = sourceMapper.editOutputSchema().addField('flag_badStar', type='Flag', doc="bad flag") # The exposure is used to record the filter name exposure = afwImage.ExposureF() # Split up the stars (units are radians) theta = np.pi / 2. - stars['coord_dec'] phi = stars['coord_ra'] ipring = hp.ang2pix(self.config.referencePixelizationNside, theta, phi) h, rev = esutil.stat.histogram(ipring, rev=True) gdpix, = np.where(h >= self.config.referencePixelizationMinStars) self.log.info( "Found %d pixels (nside=%d) with at least %d good stars" % (gdpix.size, self.config.referencePixelizationNside, self.config.referencePixelizationMinStars)) if gdpix.size < self.config.referencePixelizationNPixels: self.log.warn( "Found fewer good pixels (%d) than preferred in configuration (%d)" % (gdpix.size, self.config.referencePixelizationNPixels)) else: # Sample out the pixels we want to use gdpix = np.random.choice( gdpix, size=self.config.referencePixelizationNPixels, replace=False) results = np.zeros(gdpix.size, dtype=[('hpix', 'i4'), ('nstar', 'i4', len(self.bands)), ('nmatch', 'i4', len(self.bands)), ('zp', 'f4', len(self.bands)), ('zpErr', 'f4', len(self.bands))]) results['hpix'] = ipring[rev[rev[gdpix]]] # We need a boolean index to deal with catalogs... selected = np.zeros(len(stars), dtype=np.bool) refFluxFields = [None] * len(self.bands) for p, pix in enumerate(gdpix): i1a = rev[rev[pix]:rev[pix + 1]] # Need to convert to a boolean array selected[:] = False selected[i1a] = True for b, band in enumerate(self.bands): sourceCat = afwTable.SimpleCatalog( sourceMapper.getOutputSchema()) sourceCat.reserve(len(i1a)) sourceCat.extend(stars[selected], mapper=sourceMapper) sourceCat['flux'] = afwImage.fluxFromABMag( stars['mag_std_noabs'][selected, b]) sourceCat['fluxErr'] = afwImage.fluxErrFromABMagErr( stars['magerr_std'][selected, b], stars['mag_std_noabs'][selected, b]) # Make sure we only use stars that have valid measurements # (This is perhaps redundant with requirements above that the # stars be observed in all bands, but it can't hurt) badStar = (stars['mag_std_noabs'][selected, b] > 90.0) for rec in sourceCat[badStar]: rec.set(badStarKey, True) exposure.setFilter(afwImage.Filter(band)) if refFluxFields[b] is None: # Need to find the flux field in the reference catalog # to work around limitations of DirectMatch in PhotoCal ctr = stars[0].getCoord() rad = 0.05 * lsst.geom.degrees refDataTest = self.refObjLoader.loadSkyCircle( ctr, rad, band) refFluxFields[b] = refDataTest.fluxField # Make a copy of the config so that we can modify it calConfig = copy.copy(self.config.photoCal.value) calConfig.match.referenceSelection.signalToNoise.fluxField = refFluxFields[ b] calConfig.match.referenceSelection.signalToNoise.errField = refFluxFields[ b] + 'Err' calTask = self.config.photoCal.target( refObjLoader=self.refObjLoader, config=calConfig, schema=sourceCat.getSchema()) struct = calTask.run(exposure, sourceCat) results['nstar'][p, b] = len(i1a) results['nmatch'][p, b] = len(struct.arrays.refMag) results['zp'][p, b] = struct.zp results['zpErr'][p, b] = struct.sigma # And compute the summary statistics offsets = np.zeros(len(self.bands)) for b, band in enumerate(self.bands): # make configurable ok, = np.where( results['nmatch'][:, b] >= self.config.referenceMinMatch) offsets[b] = np.median(results['zp'][ok, b]) madSigma = 1.4826 * np.median( np.abs(results['zp'][ok, b] - offsets[b])) self.log.info( "Reference catalog offset for %s band: %.6f +/- %.6f" % (band, offsets[b], madSigma)) return offsets
# initialize dictionaries to hold lightcurve arrays. Get extendedness from the coadd catalog. lightcurve_fluxes = {} extendedness = {} for idx, ext in zip(ref.get('id'), ref.get('base_ClassificationExtendedness_value')): lightcurve_fluxes[idx] = [] extendedness[idx] = ext # pivot the source tables to assemble lightcurves for visit, forced_src in forced_srcs.iteritems(): calib = calibs[visit] for idx, flux in zip(forced_src.get('objectId'), forced_src.get('base_PsfFlux_flux')): if extendedness[idx] > 0.5: continue if flux <= 0.: continue lightcurve_fluxes[idx].append(afw_image.fluxFromABMag(calib.getMagnitude(flux))) # compute aggregate quantities for each object and plot for lightcurve in lightcurve_fluxes.values(): if len(lightcurve) == 9: plt.scatter(afw_image.abMagFromFlux(numpy.median(lightcurve)), numpy.std(lightcurve)/numpy.median(lightcurve), alpha=0.5) mags, invsnrs = make_invsnr_arr() plt.plot(mags, invsnrs, color='red', linewidth=2, alpha=0.75) plt.xlabel("Calibrated magnitude of median flux") plt.ylabel("stdev(flux)/median(flux)") plt.xlim(15.5, 25) plt.ylim(0., 0.5) plt.show()
def _runFgcmOutputProducts(self, visitDataRefName, ccdDataRefName, filterMapping, zpOffsets, testVisit, testCcd, testFilter, testBandIndex): """ """ if self.logLevel is not None: self.otherArgs.extend(['--loglevel', 'fgcmcal=%s' % self.logLevel]) args = [self.inputDir, '--output', self.testDir, '--doraise'] args.extend(self.otherArgs) result = fgcmcal.FgcmOutputProductsTask.parseAndRun( args=args, config=self.config, doReturnResults=True) self._checkResult(result) # Extract the offsets from the results offsets = result.resultList[0].results.offsets self.assertFloatsAlmostEqual(offsets[0], zpOffsets[0], rtol=1e-6) self.assertFloatsAlmostEqual(offsets[1], zpOffsets[1], rtol=1e-6) butler = dafPersistence.butler.Butler(self.testDir) # Test the reference catalog stars # Read in the raw stars... rawStars = butler.get('fgcmStandardStars', fgcmcycle=0) # Read in the new reference catalog... config = LoadIndexedReferenceObjectsConfig() config.ref_dataset_name = 'fgcm_stars' task = LoadIndexedReferenceObjectsTask(butler, config=config) # Read in a giant radius to get them all refStruct = task.loadSkyCircle(rawStars[0].getCoord(), 5.0 * lsst.geom.degrees, filterName='r') # Make sure all the stars are there self.assertEqual(len(rawStars), len(refStruct.refCat)) # And make sure the numbers are consistent test, = np.where(rawStars['id'][0] == refStruct.refCat['id']) mag = rawStars['mag_std_noabs'][0, 0] + offsets[0] flux = afwImage.fluxFromABMag(mag) fluxErr = afwImage.fluxErrFromABMagErr(rawStars['magerr_std'][0, 0], mag) self.assertFloatsAlmostEqual(flux, refStruct.refCat['r_flux'][test[0]], rtol=1e-6) self.assertFloatsAlmostEqual(fluxErr, refStruct.refCat['r_fluxErr'][test[0]], rtol=1e-6) # Test the joincal_photoCalib output zptCat = butler.get('fgcmZeropoints', fgcmcycle=0) selected = (zptCat['fgcmflag'] < 16) # Read in all the calibrations, these should all be there for rec in zptCat[selected]: testCal = butler.get('jointcal_photoCalib', dataId={ visitDataRefName: int(rec['visit']), ccdDataRefName: int(rec['ccd']), 'filter': filterMapping[rec['filtername']], 'tract': 0 }) # Our round-trip tests will be on this final one which is still loaded testCal = butler.get('jointcal_photoCalib', dataId={ visitDataRefName: int(testVisit), ccdDataRefName: int(testCcd), 'filter': filterMapping[testFilter], 'tract': 0 }) src = butler.get('src', dataId={ visitDataRefName: int(testVisit), ccdDataRefName: int(testCcd) }) # Only test sources with positive flux gdSrc = (src['slot_CalibFlux_flux'] > 0.0) # We need to apply the calibration offset to the fgcmzpt (which is internal # and doesn't know about that yet) testZpInd, = np.where((zptCat['visit'] == testVisit) & (zptCat['ccd'] == testCcd)) fgcmZpt = zptCat['fgcmzpt'][testZpInd] + offsets[testBandIndex] # This is the magnitude through the mean calibration photoCalMeanCalMags = np.zeros(gdSrc.sum()) # This is the magnitude through the full focal-plane variable mags photoCalMags = np.zeros_like(photoCalMeanCalMags) # This is the magnitude with the FGCM (central-ccd) zeropoint zptMeanCalMags = np.zeros_like(photoCalMeanCalMags) for i, rec in enumerate(src[gdSrc]): photoCalMeanCalMags[i] = testCal.instFluxToMagnitude( rec['slot_CalibFlux_flux']) photoCalMags[i] = testCal.instFluxToMagnitude( rec['slot_CalibFlux_flux'], rec.getCentroid()) zptMeanCalMags[i] = fgcmZpt - 2.5 * np.log10( rec['slot_CalibFlux_flux']) # These should be very close but some tiny differences because the fgcm value # is defined at the center of the bbox, and the photoCal is the mean over the box self.assertFloatsAlmostEqual(photoCalMeanCalMags, zptMeanCalMags, rtol=1e-6) # These should be roughly equal, but not precisely because of the focal-plane # variation. However, this is a useful sanity check for something going totally # wrong. self.assertFloatsAlmostEqual(photoCalMeanCalMags, photoCalMags, rtol=1e-2) # Test the transmission output visitCatalog = butler.get('fgcmVisitCatalog') lutCat = butler.get('fgcmLookUpTable') testTrans = butler.get( 'transmission_atmosphere_fgcm', dataId={visitDataRefName: visitCatalog[0]['visit']}) testResp = testTrans.sampleAt(position=afwGeom.Point2D(0, 0), wavelengths=lutCat[0]['atmlambda']) # The fit to be roughly consistent with the standard, although the # airmass is taken into account even with the "frozen" atmosphere. # This is also a rough comparison, because the interpolation does # not work well with such a coarse look-up table used for the test. self.assertFloatsAlmostEqual(testResp, lutCat[0]['atmstdtrans'], atol=0.06) # The second should be close to the first, but there is the airmass # difference so they aren't identical testTrans2 = butler.get( 'transmission_atmosphere_fgcm', dataId={visitDataRefName: visitCatalog[1]['visit']}) testResp2 = testTrans2.sampleAt(position=afwGeom.Point2D(0, 0), wavelengths=lutCat[0]['atmlambda']) self.assertFloatsAlmostEqual(testResp, testResp2, atol=1e-4)
def selectMatches(self, matches, sourceKeys, filterName, frame=None): """!Select reference/source matches according the criteria specified in the config. \param[in] matches ReferenceMatchVector (not modified) \param[in] sourceKeys Struct of source catalog keys, as returned by getSourceKeys() \param[in] filterName name of camera filter; used to obtain the reference flux field \param[in] frame ds9 frame number to use for debugging display if frame is non-None, display information about trimmed objects on that ds9 frame: - Bad: red x - Unsuitable objects: blue + (and a cyan o if a galaxy) - Failed flux cut: magenta * \return a \link lsst.afw.table.ReferenceMatchVector\endlink that contains only the selected matches. If a schema was passed during task construction, a flag field will be set on sources in the selected matches. \throws ValueError There are no valid matches. """ self.log.logdebug("Number of input matches: %d" % (len(matches))) if len(matches) == 0: raise ValueError("No input matches") # Only use stars for which the flags indicate the photometry is good. afterFlagCutInd = [i for i, m in enumerate(matches) if checkSourceFlags(m.second, sourceKeys)] afterFlagCut = [matches[i] for i in afterFlagCutInd] self.log.logdebug("Number of matches after source flag cuts: %d" % (len(afterFlagCut))) if len(afterFlagCut) != len(matches): if frame is not None: with ds9.Buffering(): for i, m in enumerate(matches): if i not in afterFlagCutInd: x, y = m.second.getCentroid() ds9.dot("x", x, y, size=4, frame=frame, ctype=ds9.RED) matches = afterFlagCut if len(matches) == 0: raise ValueError("All matches eliminated by source flags") refSchema = matches[0].first.schema try: photometricKey = refSchema.find("photometric").key try: resolvedKey = refSchema.find("resolved").key except: resolvedKey = None try: variableKey = refSchema.find("variable").key except: variableKey = None except: self.log.warn("No 'photometric' flag key found in reference schema.") photometricKey = None if photometricKey is not None: afterRefCutInd = [i for i, m in enumerate(matches) if m.first.get(photometricKey)] afterRefCut = [matches[i] for i in afterRefCutInd] if len(afterRefCut) != len(matches): if frame is not None: with ds9.Buffering(): for i, m in enumerate(matches): if i not in afterRefCutInd: x, y = m.second.getCentroid() ds9.dot("+", x, y, size=4, frame=frame, ctype=ds9.BLUE) if resolvedKey and m.first.get(resolvedKey): ds9.dot("o", x, y, size=6, frame=frame, ctype=ds9.CYAN) if variableKey and m.first.get(variableKey): ds9.dot("o", x, y, size=6, frame=frame, ctype=ds9.MAGENTA) matches = afterRefCut self.log.logdebug("Number of matches after reference catalog cuts: %d" % (len(matches))) if len(matches) == 0: raise RuntimeError("No sources remain in match list after reference catalog cuts.") fluxName = getRefFluxField(refSchema, filterName) fluxKey = refSchema.find(fluxName).key if self.config.magLimit is not None: fluxLimit = fluxFromABMag(self.config.magLimit) afterMagCutInd = [i for i, m in enumerate(matches) if (m.first.get(fluxKey) > fluxLimit and m.second.getPsfFlux() > 0.0)] else: afterMagCutInd = [i for i, m in enumerate(matches) if m.second.getPsfFlux() > 0.0] afterMagCut = [matches[i] for i in afterMagCutInd] if len(afterMagCut) != len(matches): if frame is not None: with ds9.Buffering(): for i, m in enumerate(matches): if i not in afterMagCutInd: x, y = m.second.getCentroid() ds9.dot("*", x, y, size=4, frame=frame, ctype=ds9.MAGENTA) matches = afterMagCut self.log.logdebug("Number of matches after magnitude limit cuts: %d" % (len(matches))) if len(matches) == 0: raise RuntimeError("No sources remaining in match list after magnitude limit cuts.") if frame is not None: with ds9.Buffering(): for m in matches: x, y = m.second.getCentroid() ds9.dot("o", x, y, size=4, frame=frame, ctype=ds9.GREEN) result = afwTable.ReferenceMatchVector() for m in matches: if self.outputField is not None: m.second.set(self.outputField, True) result.append(m) return result
def selectMatches(self, matches, sourceKeys, filterName, frame=None): """!Select reference/source matches according the criteria specified in the config. \param[in] matches ReferenceMatchVector (not modified) \param[in] sourceKeys Struct of source catalog keys, as returned by getSourceKeys() \param[in] filterName name of camera filter; used to obtain the reference flux field \param[in] frame ds9 frame number to use for debugging display if frame is non-None, display information about trimmed objects on that ds9 frame: - Bad: red x - Unsuitable objects: blue + (and a cyan o if a galaxy) - Failed flux cut: magenta * \return a \link lsst.afw.table.ReferenceMatchVector\endlink that contains only the selected matches. If a schema was passed during task construction, a flag field will be set on sources in the selected matches. \throws ValueError There are no valid matches. """ self.log.debug("Number of input matches: %d", len(matches)) if self.config.doSelectUnresolved: # Select only resolved sources if asked to do so. matches = [ m for m in matches if self.isUnresolved(m.second, sourceKeys.starGal) ] self.log.debug( "Number of matches after culling resolved sources: %d", len(matches)) if len(matches) == 0: raise ValueError("No input matches") for m in matches: if self.candidateKey is not None: m.second.set(self.candidateKey, True) # Only use stars for which the flags indicate the photometry is good. afterFlagCutInd = [ i for i, m in enumerate(matches) if checkSourceFlags(m.second, sourceKeys) ] afterFlagCut = [matches[i] for i in afterFlagCutInd] self.log.debug("Number of matches after source flag cuts: %d", len(afterFlagCut)) if len(afterFlagCut) != len(matches): if frame is not None: with ds9.Buffering(): for i, m in enumerate(matches): if i not in afterFlagCutInd: x, y = m.second.getCentroid() ds9.dot("x", x, y, size=4, frame=frame, ctype=ds9.RED) matches = afterFlagCut if len(matches) == 0: raise ValueError("All matches eliminated by source flags") refSchema = matches[0].first.schema try: photometricKey = refSchema.find("photometric").key try: resolvedKey = refSchema.find("resolved").key except: resolvedKey = None try: variableKey = refSchema.find("variable").key except: variableKey = None except: self.log.warn( "No 'photometric' flag key found in reference schema.") photometricKey = None if photometricKey is not None: afterRefCutInd = [ i for i, m in enumerate(matches) if m.first.get(photometricKey) ] afterRefCut = [matches[i] for i in afterRefCutInd] if len(afterRefCut) != len(matches): if frame is not None: with ds9.Buffering(): for i, m in enumerate(matches): if i not in afterRefCutInd: x, y = m.second.getCentroid() ds9.dot("+", x, y, size=4, frame=frame, ctype=ds9.BLUE) if resolvedKey and m.first.get(resolvedKey): ds9.dot("o", x, y, size=6, frame=frame, ctype=ds9.CYAN) if variableKey and m.first.get(variableKey): ds9.dot("o", x, y, size=6, frame=frame, ctype=ds9.MAGENTA) matches = afterRefCut self.log.debug("Number of matches after reference catalog cuts: %d", len(matches)) if len(matches) == 0: raise RuntimeError( "No sources remain in match list after reference catalog cuts." ) fluxName = getRefFluxField(refSchema, filterName) fluxKey = refSchema.find(fluxName).key if self.config.magLimit is not None: fluxLimit = fluxFromABMag(self.config.magLimit) afterMagCutInd = [ i for i, m in enumerate(matches) if (m.first.get(fluxKey) > fluxLimit and m.second.getPsfFlux() > 0.0) ] else: afterMagCutInd = [ i for i, m in enumerate(matches) if m.second.getPsfFlux() > 0.0 ] afterMagCut = [matches[i] for i in afterMagCutInd] if len(afterMagCut) != len(matches): if frame is not None: with ds9.Buffering(): for i, m in enumerate(matches): if i not in afterMagCutInd: x, y = m.second.getCentroid() ds9.dot("*", x, y, size=4, frame=frame, ctype=ds9.MAGENTA) matches = afterMagCut self.log.debug("Number of matches after magnitude limit cuts: %d", len(matches)) if len(matches) == 0: raise RuntimeError( "No sources remaining in match list after magnitude limit cuts." ) if frame is not None: with ds9.Buffering(): for m in matches: x, y = m.second.getCentroid() ds9.dot("o", x, y, size=4, frame=frame, ctype=ds9.GREEN) result = [] for m in matches: if self.usedKey is not None: m.second.set(self.usedKey, True) result.append(m) return result