def __init__(self): self.plotType = 'SummaryHistogram' self.objectPlotter = True self.defaultPlotDict = {'title': None, 'xlabel': None, 'ylabel': 'Count', 'label': None, 'cumulative': False, 'xMin': None, 'xMax': None, 'yMin': None, 'yMax': None, 'color': 'b', 'linestyle': '-', 'histStyle': True, 'metricReduce': metrics.SumMetric(), 'bins': None}
def scienceRadarBatch(colmap=None, runName='opsim', extraSql=None, extraMetadata=None, nside=64, benchmarkArea=18000, benchmarkNvisits=825, DDF=True): """A batch of metrics for looking at survey performance relative to the SRD and the main science drivers of LSST. Parameters ---------- """ # Hide dependencies from mafContrib.LSSObsStrategy.galaxyCountsMetric_extended import GalaxyCountsMetric_extended from mafContrib import Plasticc_metric, plasticc_slicer, load_plasticc_lc, TDEsAsciiMetric if colmap is None: colmap = ColMapDict('fbs') if extraSql is None: extraSql = '' if extraSql == '': joiner = '' else: joiner = ' and ' bundleList = [] # Get some standard per-filter coloring and sql constraints filterlist, colors, filterorders, filtersqls, filtermetadata = filterList( all=False, extraSql=extraSql, extraMetadata=extraMetadata) standardStats = standardSummary(withCount=False) healslicer = slicers.HealpixSlicer(nside=nside) subsetPlots = [plots.HealpixSkyMap(), plots.HealpixHistogram()] # Load up the plastic light curves models = ['SNIa-normal', 'KN'] plasticc_models_dict = {} for model in models: plasticc_models_dict[model] = list( load_plasticc_lc(model=model).values()) ######################### # SRD, DM, etc ######################### fOb = fOBatch(runName=runName, colmap=colmap, extraSql=extraSql, extraMetadata=extraMetadata, benchmarkArea=benchmarkArea, benchmarkNvisits=benchmarkNvisits) astromb = astrometryBatch(runName=runName, colmap=colmap, extraSql=extraSql, extraMetadata=extraMetadata) rapidb = rapidRevisitBatch(runName=runName, colmap=colmap, extraSql=extraSql, extraMetadata=extraMetadata) # loop through and modify the display dicts - set SRD as group and their previous 'group' as the subgroup temp_list = [] for key in fOb: temp_list.append(fOb[key]) for key in astromb: temp_list.append(astromb[key]) for key in rapidb: temp_list.append(rapidb[key]) for metricb in temp_list: metricb.displayDict['subgroup'] = metricb.displayDict['group'].replace( 'SRD', '').lstrip(' ') metricb.displayDict['group'] = 'SRD' bundleList.extend(temp_list) displayDict = { 'group': 'SRD', 'subgroup': 'Year Coverage', 'order': 0, 'caption': 'Number of years with observations.' } slicer = slicers.HealpixSlicer(nside=nside) metric = metrics.YearCoverageMetric() for f in filterlist: plotDict = {'colorMin': 7, 'colorMax': 10, 'color': colors[f]} summary = [ metrics.AreaSummaryMetric(area=18000, reduce_func=np.mean, decreasing=True, metricName='N Seasons (18k) %s' % f) ] bundleList.append( mb.MetricBundle(metric, slicer, filtersqls[f], plotDict=plotDict, metadata=filtermetadata[f], displayDict=displayDict, summaryMetrics=summary)) ######################### # Solar System ######################### # Generally, we need to run Solar System metrics separately; they're a multi-step process. ######################### # Cosmology ######################### displayDict = { 'group': 'Cosmology', 'subgroup': 'Galaxy Counts', 'order': 0, 'caption': None } plotDict = {'percentileClip': 95., 'nTicks': 5} sql = extraSql + joiner + 'filter="i"' metadata = combineMetadata(extraMetadata, 'i band') metric = GalaxyCountsMetric_extended(filterBand='i', redshiftBin='all', nside=nside) summary = [ metrics.AreaSummaryMetric(area=18000, reduce_func=np.sum, decreasing=True, metricName='N Galaxies (18k)') ] summary.append(metrics.SumMetric(metricName='N Galaxies (all)')) # make sure slicer has cache off slicer = slicers.HealpixSlicer(nside=nside, useCache=False) bundle = mb.MetricBundle(metric, slicer, sql, plotDict=plotDict, metadata=metadata, displayDict=displayDict, summaryMetrics=summary, plotFuncs=subsetPlots) bundleList.append(bundle) displayDict['order'] += 1 # let's put Type Ia SN in here displayDict['subgroup'] = 'SNe Ia' # XXX-- use the light curves from PLASTICC here displayDict['caption'] = 'Fraction of normal SNe Ia' sql = extraSql slicer = plasticc_slicer(plcs=plasticc_models_dict['SNIa-normal'], seed=42, badval=0) metric = Plasticc_metric(metricName='SNIa') # Set the maskval so that we count missing objects as zero. summary_stats = [metrics.MeanMetric(maskVal=0)] plotFuncs = [plots.HealpixSkyMap()] bundle = mb.MetricBundle(metric, slicer, sql, runName=runName, summaryMetrics=summary_stats, plotFuncs=plotFuncs, metadata=extraMetadata, displayDict=displayDict) bundleList.append(bundle) displayDict['order'] += 1 displayDict['subgroup'] = 'Camera Rotator' displayDict[ 'caption'] = 'Kuiper statistic (0 is uniform, 1 is delta function) of the ' slicer = slicers.HealpixSlicer(nside=nside) metric1 = metrics.KuiperMetric('rotSkyPos') metric2 = metrics.KuiperMetric('rotTelPos') for f in filterlist: for m in [metric1, metric2]: plotDict = {'color': colors[f]} displayDict['order'] = filterorders[f] displayDict['caption'] += f"{m.colname} for visits in {f} band." bundleList.append( mb.MetricBundle(m, slicer, filtersqls[f], plotDict=plotDict, displayDict=displayDict, summaryMetrics=standardStats, plotFuncs=subsetPlots)) # XXX--need some sort of metric for weak lensing ######################### # Variables and Transients ######################### displayDict = { 'group': 'Variables/Transients', 'subgroup': 'Periodic Stars', 'order': 0, 'caption': None } for period in [ 0.5, 1, 2, ]: for magnitude in [21., 24.]: amplitudes = [0.05, 0.1, 1.0] periods = [period] * len(amplitudes) starMags = [magnitude] * len(amplitudes) plotDict = { 'nTicks': 3, 'colorMin': 0, 'colorMax': 3, 'xMin': 0, 'xMax': 3 } metadata = combineMetadata( 'P_%.1f_Mag_%.0f_Amp_0.05-0.1-1' % (period, magnitude), extraMetadata) sql = None displayDict['caption'] = 'Metric evaluates if a periodic signal of period %.1f days could ' \ 'be detected for an r=%i star. A variety of amplitudes of periodicity ' \ 'are tested: [1, 0.1, and 0.05] mag amplitudes, which correspond to ' \ 'metric values of [1, 2, or 3]. ' % (period, magnitude) metric = metrics.PeriodicDetectMetric(periods=periods, starMags=starMags, amplitudes=amplitudes, metricName='PeriodDetection') bundle = mb.MetricBundle(metric, healslicer, sql, metadata=metadata, displayDict=displayDict, plotDict=plotDict, plotFuncs=subsetPlots, summaryMetrics=standardStats) bundleList.append(bundle) displayDict['order'] += 1 # XXX add some PLASTICC metrics for kilovnova and tidal disruption events. displayDict['subgroup'] = 'KN' displayDict['caption'] = 'Fraction of Kilonova (from PLASTICC)' displayDict['order'] = 0 slicer = plasticc_slicer(plcs=plasticc_models_dict['KN'], seed=43, badval=0) metric = Plasticc_metric(metricName='KN') plotFuncs = [plots.HealpixSkyMap()] summary_stats = [metrics.MeanMetric(maskVal=0)] bundle = mb.MetricBundle(metric, slicer, extraSql, runName=runName, summaryMetrics=summary_stats, plotFuncs=plotFuncs, metadata=extraMetadata, displayDict=displayDict) bundleList.append(bundle) # Tidal Disruption Events displayDict['subgroup'] = 'TDE' displayDict[ 'caption'] = 'Fraction of TDE lightcurves that could be identified, outside of DD fields' detectSNR = {'u': 5, 'g': 5, 'r': 5, 'i': 5, 'z': 5, 'y': 5} # light curve parameters epochStart = -22 peakEpoch = 0 nearPeakT = 10 postPeakT = 14 # two weeks nPhaseCheck = 1 # condition parameters nObsTotal = {'u': 0, 'g': 0, 'r': 0, 'i': 0, 'z': 0, 'y': 0} nObsPrePeak = 1 nObsNearPeak = {'u': 0, 'g': 0, 'r': 0, 'i': 0, 'z': 0, 'y': 0} nFiltersNearPeak = 3 nObsPostPeak = 0 nFiltersPostPeak = 2 metric = TDEsAsciiMetric(asciifile=None, detectSNR=detectSNR, epochStart=epochStart, peakEpoch=peakEpoch, nearPeakT=nearPeakT, postPeakT=postPeakT, nPhaseCheck=nPhaseCheck, nObsTotal=nObsTotal, nObsPrePeak=nObsPrePeak, nObsNearPeak=nObsNearPeak, nFiltersNearPeak=nFiltersNearPeak, nObsPostPeak=nObsPostPeak, nFiltersPostPeak=nFiltersPostPeak) slicer = slicers.HealpixSlicer(nside=32) sql = extraSql + joiner + "note not like '%DD%'" md = extraMetadata if md is None: md = " NonDD" else: md += 'NonDD' bundle = mb.MetricBundle(metric, slicer, sql, runName=runName, summaryMetrics=standardStats, plotFuncs=plotFuncs, metadata=md, displayDict=displayDict) bundleList.append(bundle) # XXX -- would be good to add some microlensing events, for both MW and LMC/SMC. ######################### # Milky Way ######################### displayDict = {'group': 'Milky Way', 'subgroup': ''} displayDict['subgroup'] = 'N stars' slicer = slicers.HealpixSlicer(nside=nside, useCache=False) sum_stats = [metrics.SumMetric(metricName='Total N Stars')] for f in filterlist: displayDict['order'] = filterorders[f] displayDict['caption'] = 'Number of stars in %s band with an measurement error due to crowding ' \ 'of less than 0.1 mag' % f # Configure the NstarsMetric - note 'filtername' refers to the filter in which to evaluate crowding metric = metrics.NstarsMetric(crowding_error=0.1, filtername='r', seeingCol=colmap['seeingGeom'], m5Col=colmap['fiveSigmaDepth']) plotDict = {'nTicks': 5, 'logScale': True, 'colorMin': 100} bundle = mb.MetricBundle(metric, slicer, filtersqls[f], runName=runName, summaryMetrics=sum_stats, plotFuncs=subsetPlots, plotDict=plotDict, displayDict=displayDict) bundleList.append(bundle) ######################### # DDF ######################### if DDF: # Hide this import to avoid adding a dependency. from lsst.sims.featureScheduler.surveys import generate_dd_surveys, Deep_drilling_survey ddf_surveys = generate_dd_surveys() # Add on the Euclid fields # XXX--to update. Should have a spot where all the DDF locations are stored. ddf_surveys.append( Deep_drilling_survey([], 58.97, -49.28, survey_name='DD:EDFSa')) ddf_surveys.append( Deep_drilling_survey([], 63.6, -47.60, survey_name='DD:EDFSb')) # For doing a high-res sampling of the DDF for co-adds ddf_radius = 1.8 # Degrees ddf_nside = 512 ra, dec = hpid2RaDec(ddf_nside, np.arange(hp.nside2npix(ddf_nside))) displayDict = {'group': 'DDF depths', 'subgroup': None} for survey in ddf_surveys: displayDict['subgroup'] = survey.survey_name # Crop off the u-band only DDF if survey.survey_name[0:4] != 'DD:u': dist_to_ddf = angularSeparation(ra, dec, np.degrees(survey.ra), np.degrees(survey.dec)) goodhp = np.where(dist_to_ddf <= ddf_radius) slicer = slicers.UserPointsSlicer(ra=ra[goodhp], dec=dec[goodhp], useCamera=False) for f in filterlist: metric = metrics.Coaddm5Metric( metricName=survey.survey_name + ', ' + f) summary = [ metrics.MedianMetric(metricName='Median depth ' + survey.survey_name + ', ' + f) ] plotDict = {'color': colors[f]} sql = filtersqls[f] displayDict['order'] = filterorders[f] displayDict['caption'] = 'Coadded m5 depth in %s band.' % ( f) bundle = mb.MetricBundle(metric, slicer, sql, metadata=filtermetadata[f], displayDict=displayDict, summaryMetrics=summary, plotFuncs=[], plotDict=plotDict) bundleList.append(bundle) displayDict = {'group': 'DDF Transients', 'subgroup': None} for survey in ddf_surveys: displayDict['subgroup'] = survey.survey_name if survey.survey_name[0:4] != 'DD:u': slicer = plasticc_slicer( plcs=plasticc_models_dict['SNIa-normal'], seed=42, ra_cen=survey.ra, dec_cen=survey.dec, radius=np.radians(3.), useCamera=False) metric = Plasticc_metric(metricName=survey.survey_name + ' SNIa') sql = extraSql summary_stats = [metrics.MeanMetric(maskVal=0)] plotFuncs = [plots.HealpixSkyMap()] bundle = mb.MetricBundle(metric, slicer, sql, runName=runName, summaryMetrics=summary_stats, plotFuncs=plotFuncs, metadata=extraMetadata, displayDict=displayDict) bundleList.append(bundle) displayDict['order'] = 10 # Set the runName for all bundles and return the bundleDict. for b in bundleList: b.setRunName(runName) bundleDict = mb.makeBundlesDictFromList(bundleList) return bundleDict
def slewActivities(colmap=None, runName='opsim', totalSlewN=1, sqlConstraint=None): """Generate a set of slew statistics focused on finding the contributions to the overall slew time. These slew statistics must be run on the SlewActivities table in opsimv4 and opsimv3. Note that the type of activities listed are different between v3 and v4. Parameters ---------- colmap : dict or None, opt A dictionary with a mapping of column names. Default will use OpsimV4 column names. runName : str, opt The name of the simulated survey. Default is "opsim". totalSlewN : int, opt The total number of slews in the simulated survey. Used to calculate % of slew activities for each component. Default is 1. sqlConstraint : str or None, opt SQL constraint to apply to metrics. Note this runs on Slew*State table, so constraints should generally be based on slew_slewCount. Returns ------- metricBundleDict """ if totalSlewN == 1: warnings.warn('TotalSlewN should be set (using 1). Percents from activities may be incorrect.') if colmap is None: colmap = ColMapDict('opsimV4') bundleList = [] # All of these metrics run with a unislicer, on all the slew data. slicer = slicers.UniSlicer() if 'slewactivities' not in colmap: raise ValueError("List of slewactivities not in colmap! Will not create slewActivities bundles.") slewTypeDict = colmap['slewactivities'] displayDict = {'group': 'Slew', 'subgroup': 'Slew Activities', 'order': -1, 'caption': None} for slewType in slewTypeDict: metadata = combineMetadata(slewType, sqlConstraint) tableValue = slewTypeDict[slewType] # Metrics for all activities of this type. sql = 'activityDelay>0 and activity="%s"' % tableValue if sqlConstraint is not None: sql = '(%s) and (%s)' % (sql, sqlConstraint) # Percent of slews which include this activity. metric = metrics.CountRatioMetric(col='activityDelay', normVal=totalSlewN / 100.0, metricName='ActivePerc') displayDict['caption'] = 'Percent of total slews which include %s movement.' % slewType displayDict['order'] += 1 bundle = mb.MetricBundle(metric, slicer, sql, displayDict=displayDict, metadata=metadata) bundleList.append(bundle) # Mean time for this activity, in all slews. metric = metrics.MeanMetric(col='activityDelay', metricName='Ave T(s)') displayDict['caption'] = 'Mean amount of time (in seconds) for %s movements.' % (slewType) displayDict['order'] += 1 bundle = mb.MetricBundle(metric, slicer, sql, displayDict=displayDict, metadata=metadata) bundleList.append(bundle) # Maximum time for this activity, in all slews. metric = metrics.MaxMetric(col='activityDelay', metricName='Max T(s)') displayDict['caption'] = 'Max amount of time (in seconds) for %s movement.' % (slewType) displayDict['order'] += 1 bundle = mb.MetricBundle(metric, slicer, sql, displayDict=displayDict, metadata=metadata) bundleList.append(bundle) # Metrics for activities of this type which are in the critical path. sql = 'activityDelay>0 and inCriticalPath="True" and activity="%s"' % tableValue if sqlConstraint is not None: sql = '(%s) and (%s)' % (sql, sqlConstraint) # Percent of slews which include this activity in the critical path. metric = metrics.CountRatioMetric(col='activityDelay', normVal=totalSlewN / 100.0, metricName='ActivePerc in crit') displayDict['caption'] = 'Percent of total slew which include %s movement, ' \ 'and are in critical path.' % (slewType) displayDict['order'] += 1 bundle = mb.MetricBundle(metric, slicer, sql, displayDict=displayDict, metadata=metadata) bundleList.append(bundle) # Mean time for slews which include this activity, in the critical path. metric = metrics.MeanMetric(col='activityDelay', metricName='Ave T(s) in crit') displayDict['caption'] = 'Mean time (in seconds) for %s movements, ' \ 'when in critical path.' % (slewType) displayDict['order'] += 1 bundle = mb.MetricBundle(metric, slicer, sql, displayDict=displayDict, metadata=metadata) bundleList.append(bundle) # Total time that this activity was in the critical path. metric = metrics.SumMetric(col='activityDelay', metricName='Total T(s) in crit') displayDict['caption'] = 'Total time (in seconds) for %s movements, ' \ 'when in critical path.' % (slewType) displayDict['order'] += 1 bundle = mb.MetricBundle(metric, slicer, sql, displayDict=displayDict, metadata=metadata) bundleList.append(bundle) for b in bundleList: b.setRunName(runName) return mb.makeBundlesDictFromList(bundleList)
def testSumMetric(self): """Test Sum metric.""" testmetric = metrics.SumMetric('testdata') self.assertEqual(testmetric.run(self.dv), self.dv['testdata'].sum())
def scienceRadarBatch(colmap=None, runName='', extraSql=None, extraMetadata=None, nside=64, benchmarkArea=18000, benchmarkNvisits=825, DDF=True): """A batch of metrics for looking at survey performance relative to the SRD and the main science drivers of LSST. Parameters ---------- """ # Hide dependencies from mafContrib.LSSObsStrategy.galaxyCountsMetric_extended import GalaxyCountsMetric_extended from mafContrib import Plasticc_metric, plasticc_slicer, load_plasticc_lc if colmap is None: colmap = ColMapDict('opsimV4') if extraSql is None: extraSql = '' if extraSql == '': joiner = '' else: joiner = ' and ' bundleList = [] healslicer = slicers.HealpixSlicer(nside=nside) subsetPlots = [plots.HealpixSkyMap(), plots.HealpixHistogram()] # Load up the plastic light curves models = ['SNIa-normal', 'KN'] plasticc_models_dict = {} for model in models: plasticc_models_dict[model] = list( load_plasticc_lc(model=model).values()) ######################### # SRD, DM, etc ######################### sql = extraSql displayDict = { 'group': 'SRD', 'subgroup': 'fO', 'order': 0, 'caption': None } metric = metrics.CountMetric(col=colmap['mjd'], metricName='fO') plotDict = { 'xlabel': 'Number of Visits', 'Asky': benchmarkArea, 'Nvisit': benchmarkNvisits, 'xMin': 0, 'xMax': 1500 } summaryMetrics = [ metrics.fOArea(nside=nside, norm=False, metricName='fOArea', Asky=benchmarkArea, Nvisit=benchmarkNvisits), metrics.fOArea(nside=nside, norm=True, metricName='fOArea/benchmark', Asky=benchmarkArea, Nvisit=benchmarkNvisits), metrics.fONv(nside=nside, norm=False, metricName='fONv', Asky=benchmarkArea, Nvisit=benchmarkNvisits), metrics.fONv(nside=nside, norm=True, metricName='fONv/benchmark', Asky=benchmarkArea, Nvisit=benchmarkNvisits) ] caption = 'The FO metric evaluates the overall efficiency of observing. ' caption += ( 'foNv: out of %.2f sq degrees, the area receives at least X and a median of Y visits ' '(out of %d, if compared to benchmark). ' % (benchmarkArea, benchmarkNvisits)) caption += ('fOArea: this many sq deg (out of %.2f sq deg if compared ' 'to benchmark) receives at least %d visits. ' % (benchmarkArea, benchmarkNvisits)) displayDict['caption'] = caption bundle = mb.MetricBundle(metric, healslicer, sql, plotDict=plotDict, displayDict=displayDict, summaryMetrics=summaryMetrics, plotFuncs=[plots.FOPlot()]) bundleList.append(bundle) displayDict['order'] += 1 displayDict = { 'group': 'SRD', 'subgroup': 'Gaps', 'order': 0, 'caption': None } plotDict = {'percentileClip': 95.} for filtername in 'ugrizy': sql = extraSql + joiner + 'filter ="%s"' % filtername metric = metrics.MaxGapMetric() summaryMetrics = [ metrics.PercentileMetric( percentile=95, metricName='95th percentile of Max gap, %s' % filtername) ] bundle = mb.MetricBundle(metric, healslicer, sql, plotFuncs=subsetPlots, summaryMetrics=summaryMetrics, displayDict=displayDict, plotDict=plotDict) bundleList.append(bundle) displayDict['order'] += 1 ######################### # Solar System ######################### # XXX -- may want to do Solar system seperatly # XXX--fraction of NEOs detected (assume some nominal size and albido) # XXX -- fraction of MBAs detected # XXX -- fraction of KBOs detected # XXX--any others? Planet 9s? Comets? Neptune Trojans? ######################### # Cosmology ######################### displayDict = { 'group': 'Cosmology', 'subgroup': 'galaxy counts', 'order': 0, 'caption': None } plotDict = {'percentileClip': 95.} sql = extraSql + joiner + 'filter="i"' metric = GalaxyCountsMetric_extended(filterBand='i', redshiftBin='all', nside=nside) summary = [ metrics.AreaSummaryMetric(area=18000, reduce_func=np.sum, decreasing=True, metricName='N Galaxies (WFD)') ] summary.append(metrics.SumMetric(metricName='N Galaxies (all)')) # make sure slicer has cache off slicer = slicers.HealpixSlicer(nside=nside, useCache=False) bundle = mb.MetricBundle(metric, slicer, sql, plotDict=plotDict, displayDict=displayDict, summaryMetrics=summary, plotFuncs=subsetPlots) bundleList.append(bundle) displayDict['order'] += 1 # let's put Type Ia SN in here displayDict['subgroup'] = 'SNe Ia' metadata = '' # XXX-- use the light curves from PLASTICC here displayDict['Caption'] = 'Fraction of normal SNe Ia' sql = '' slicer = plasticc_slicer(plcs=plasticc_models_dict['SNIa-normal'], seed=42, badval=0) metric = Plasticc_metric(metricName='SNIa') # Set the maskval so that we count missing objects as zero. summary_stats = [metrics.MeanMetric(maskVal=0)] plotFuncs = [plots.HealpixSkyMap()] bundle = mb.MetricBundle(metric, slicer, sql, runName=runName, summaryMetrics=summary_stats, plotFuncs=plotFuncs, metadata=metadata, displayDict=displayDict) bundleList.append(bundle) displayDict['order'] += 1 # XXX--need some sort of metric for weak lensing and camera rotation. ######################### # Variables and Transients ######################### displayDict = { 'group': 'Variables and Transients', 'subgroup': 'Periodic Stars', 'order': 0, 'caption': None } periods = [0.1, 0.5, 1., 2., 5., 10., 20.] # days plotDict = {} metadata = '' sql = extraSql displayDict[ 'Caption'] = 'Measure of how well a periodic signal can be measured combining amplitude and phase coverage. 1 is perfect, 0 is no way to fit' for period in periods: summary = metrics.PercentileMetric( percentile=10., metricName='10th %%-ile Periodic Quality, Period=%.1f days' % period) metric = metrics.PeriodicQualityMetric( period=period, starMag=20., metricName='Periodic Stars, P=%.1f d' % period) bundle = mb.MetricBundle(metric, healslicer, sql, metadata=metadata, displayDict=displayDict, plotDict=plotDict, plotFuncs=subsetPlots, summaryMetrics=summary) bundleList.append(bundle) displayDict['order'] += 1 # XXX add some PLASTICC metrics for kilovnova and tidal disruption events. displayDict['subgroup'] = 'KN' displayDict['caption'] = 'Fraction of Kilonova (from PLASTICC)' sql = '' slicer = plasticc_slicer(plcs=plasticc_models_dict['KN'], seed=43, badval=0) metric = Plasticc_metric(metricName='KN') summary_stats = [metrics.MeanMetric(maskVal=0)] plotFuncs = [plots.HealpixSkyMap()] bundle = mb.MetricBundle(metric, slicer, sql, runName=runName, summaryMetrics=summary_stats, plotFuncs=plotFuncs, metadata=metadata, displayDict=displayDict) bundleList.append(bundle) displayDict['order'] += 1 # XXX -- would be good to add some microlensing events, for both MW and LMC/SMC. ######################### # Milky Way ######################### # Let's do the proper motion, parallax, and DCR degen of a 20nd mag star rmag = 20. displayDict = { 'group': 'Milky Way', 'subgroup': 'Astrometry', 'order': 0, 'caption': None } sql = extraSql metadata = '' plotDict = {'percentileClip': 95.} metric = metrics.ParallaxMetric(metricName='Parallax Error r=%.1f' % (rmag), rmag=rmag, seeingCol=colmap['seeingGeom'], filterCol=colmap['filter'], m5Col=colmap['fiveSigmaDepth'], normalize=False) summary = [ metrics.AreaSummaryMetric(area=18000, reduce_func=np.median, decreasing=False, metricName='Median Parallax Error (WFD)') ] summary.append( metrics.PercentileMetric(percentile=95, metricName='95th Percentile Parallax Error')) bundle = mb.MetricBundle(metric, healslicer, sql, metadata=metadata, displayDict=displayDict, plotDict=plotDict, plotFuncs=subsetPlots, summaryMetrics=summary) bundleList.append(bundle) displayDict['order'] += 1 metric = metrics.ProperMotionMetric( metricName='Proper Motion Error r=%.1f' % rmag, rmag=rmag, m5Col=colmap['fiveSigmaDepth'], mjdCol=colmap['mjd'], filterCol=colmap['filter'], seeingCol=colmap['seeingGeom'], normalize=False) summary = [ metrics.AreaSummaryMetric( area=18000, reduce_func=np.median, decreasing=False, metricName='Median Proper Motion Error (WFD)') ] summary.append( metrics.PercentileMetric( metricName='95th Percentile Proper Motion Error')) bundle = mb.MetricBundle(metric, healslicer, sql, metadata=metadata, displayDict=displayDict, plotDict=plotDict, summaryMetrics=summary, plotFuncs=subsetPlots) bundleList.append(bundle) displayDict['order'] += 1 metric = metrics.ParallaxDcrDegenMetric( metricName='Parallax-DCR degeneracy r=%.1f' % (rmag), rmag=rmag, seeingCol=colmap['seeingEff'], filterCol=colmap['filter'], m5Col=colmap['fiveSigmaDepth']) caption = 'Correlation between parallax offset magnitude and hour angle for a r=%.1f star.' % ( rmag) caption += ' (0 is good, near -1 or 1 is bad).' # XXX--not sure what kind of summary to do here summary = [metrics.MeanMetric(metricName='Mean DCR Degeneracy')] bundle = mb.MetricBundle(metric, healslicer, sql, metadata=metadata, displayDict=displayDict, summaryMetrics=summary, plotFuncs=subsetPlots) bundleList.append(bundle) displayDict['order'] += 1 for b in bundleList: b.setRunName(runName) ######################### # DDF ######################### ddf_time_bundleDicts = [] if DDF: # Hide this import to avoid adding a dependency. from lsst.sims.featureScheduler.surveys import generate_dd_surveys ddf_surveys = generate_dd_surveys() # For doing a high-res sampling of the DDF for co-adds ddf_radius = 1.8 # Degrees ddf_nside = 512 ra, dec = hpid2RaDec(ddf_nside, np.arange(hp.nside2npix(ddf_nside))) displayDict = { 'group': 'DDF depths', 'subgroup': None, 'order': 0, 'caption': None } # Run the inter and intra gaps at the center of the DDFs for survey in ddf_surveys: slicer = slicers.UserPointsSlicer(ra=np.degrees(survey.ra), dec=np.degrees(survey.dec), useCamera=False) ddf_time_bundleDicts.append( interNight(colmap=colmap, slicer=slicer, runName=runName, nside=64, extraSql='note="%s"' % survey.survey_name, subgroup=survey.survey_name)[0]) ddf_time_bundleDicts.append( intraNight(colmap=colmap, slicer=slicer, runName=runName, nside=64, extraSql='note="%s"' % survey.survey_name, subgroup=survey.survey_name)[0]) for survey in ddf_surveys: displayDict['subgroup'] = survey.survey_name # Crop off the u-band only DDF if survey.survey_name[0:4] != 'DD:u': dist_to_ddf = angularSeparation(ra, dec, np.degrees(survey.ra), np.degrees(survey.dec)) goodhp = np.where(dist_to_ddf <= ddf_radius) slicer = slicers.UserPointsSlicer(ra=ra[goodhp], dec=dec[goodhp], useCamera=False) for filtername in ['u', 'g', 'r', 'i', 'z', 'y']: metric = metrics.Coaddm5Metric( metricName=survey.survey_name + ', ' + filtername) summary = [ metrics.MedianMetric(metricName='median depth ' + survey.survey_name + ', ' + filtername) ] sql = extraSql + joiner + 'filter = "%s"' % filtername bundle = mb.MetricBundle(metric, slicer, sql, metadata=metadata, displayDict=displayDict, summaryMetrics=summary, plotFuncs=[]) bundleList.append(bundle) displayDict['order'] += 1 displayDict = { 'group': 'DDF Transients', 'subgroup': None, 'order': 0, 'caption': None } for survey in ddf_surveys: displayDict['subgroup'] = survey.survey_name if survey.survey_name[0:4] != 'DD:u': slicer = plasticc_slicer( plcs=plasticc_models_dict['SNIa-normal'], seed=42, ra_cen=survey.ra, dec_cen=survey.dec, radius=np.radians(3.), useCamera=False) metric = Plasticc_metric(metricName=survey.survey_name + ' SNIa') sql = '' summary_stats = [metrics.MeanMetric(maskVal=0)] plotFuncs = [plots.HealpixSkyMap()] bundle = mb.MetricBundle(metric, slicer, sql, runName=runName, summaryMetrics=summary_stats, plotFuncs=plotFuncs, metadata=metadata, displayDict=displayDict) bundleList.append(bundle) displayDict['order'] += 1 for b in bundleList: b.setRunName(runName) bundleDict = mb.makeBundlesDictFromList(bundleList) intraDict = intraNight(colmap=colmap, runName=runName, nside=nside, extraSql=extraSql, extraMetadata=extraMetadata)[0] interDict = interNight(colmap=colmap, runName=runName, nside=nside, extraSql=extraSql, extraMetadata=extraMetadata)[0] bundleDict.update(intraDict) bundleDict.update(interDict) for ddf_time in ddf_time_bundleDicts: bundleDict.update(ddf_time) return bundleDict
def descWFDBatch(colmap=None, runName='opsim', nside=64, bandpass='******', nfilters_needed=6, lim_ebv=0.2, mag_cuts = {1: 24.75 - 0.1, 3: 25.35 - 0.1, 6: 25.72 - 0.1, 10: 26.0 - 0.1}): # Hide some dependencies .. we should probably bring these into MAF from mafContrib.lssmetrics.depthLimitedNumGalMetric import DepthLimitedNumGalMetric from mafContrib import (Plasticc_metric, plasticc_slicer, load_plasticc_lc) # The options to add additional sql constraints are removed for now. if colmap is None: colmap = ColMapDict('fbs') # Calculate a subset of DESC WFD-related metrics. displayDict = {'group': 'Cosmology'} subgroupCount = 1 standardStats = standardSummary(withCount=False) subsetPlots = [plots.HealpixSkyMap(), plots.HealpixHistogram()] if not isinstance(mag_cuts, dict): if isinstance(mag_cuts, float) or isinstance(mag_cuts, int): mag_cuts = {10: mag_cuts} else: raise TypeError() yrs = list(mag_cuts.keys()) maxYr = max(yrs) # Load up the plastic light curves models = ['SNIa-normal'] plasticc_models_dict = {} for model in models: plasticc_models_dict[model] = list(load_plasticc_lc(model=model).values()) # One of the primary concerns for DESC WFD metrics is to add dust extinction and coadded depth limits # as well as to get some coverage in all 6 bandpasses. # These cuts figure into many of the general metrics. displayDict['subgroup'] = f'{subgroupCount}: Static Science' ## Static Science # Calculate the static science metrics - effective survey area, mean/median coadded depth, stdev of # coadded depth and the 3x2ptFoM emulator. dustmap = maps.DustMap(nside=nside, interp=False) pix_area = hp.nside2pixarea(nside, degrees=True) summaryMetrics = [metrics.MeanMetric(), metrics.MedianMetric(), metrics.RmsMetric(), metrics.CountRatioMetric(normVal=1/pix_area, metricName='Effective Area (deg)')] bundleList = [] displayDict['order'] = 0 for yr_cut in yrs: ptsrc_lim_mag_i_band = mag_cuts[yr_cut] sqlconstraint = 'night <= %s' % (yr_cut * 365.25) sqlconstraint += ' and note not like "DD%"' metadata = f'{bandpass} band non-DD year {yr_cut}' ThreebyTwoSummary = metrics.StaticProbesFoMEmulatorMetricSimple(nside=nside, year=yr_cut, metricName='3x2ptFoM') print(colmap['fiveSigmaDepth'], colmap['filter']) m = metrics.ExgalM5_with_cuts(m5Col=colmap['fiveSigmaDepth'], filterCol=colmap['filter'], lsstFilter=bandpass, nFilters=nfilters_needed, extinction_cut=lim_ebv, depth_cut=ptsrc_lim_mag_i_band) s = slicers.HealpixSlicer(nside=nside, useCache=False) caption = f'Cosmology/Static Science metrics are based on evaluating the region of ' caption += f'the sky that meets the requirements (in year {yr_cut} of coverage in ' caption += f'all {nfilters_needed}, a lower E(B-V) value than {lim_ebv}, and at ' caption += f'least a coadded depth of {ptsrc_lim_mag_i_band} in {bandpass}. ' caption += f'From there the effective survey area, coadded depth, standard deviation of the depth, ' caption += f'and a 3x2pt static science figure of merit emulator are calculated using the ' caption += f'dust-extincted coadded depth map (over that reduced footprint).' displayDict['caption'] = caption bundle = mb.MetricBundle(m, s, sqlconstraint, mapsList=[dustmap], metadata=metadata, summaryMetrics=summaryMetrics + [ThreebyTwoSummary], displayDict=displayDict) displayDict['order'] += 1 bundleList.append(bundle) ## LSS Science # The only metric we have from LSS is the NGals metric - which is similar to the GalaxyCountsExtended # metric, but evaluated only on the depth/dust cuts footprint. subgroupCount += 1 displayDict['subgroup'] = f'{subgroupCount}: LSS' displayDict['order'] = 0 plotDict = {'nTicks': 5} # Have to include all filters in query, so that we check for all-band coverage. # Galaxy numbers calculated using 'bandpass' images only though. sqlconstraint = f'note not like "DD%"' metadata = f'{bandpass} band galaxies non-DD' metric = DepthLimitedNumGalMetric(m5Col=colmap['fiveSigmaDepth'], filterCol=colmap['filter'], nside=nside, filterBand=bandpass, redshiftBin='all', nfilters_needed=nfilters_needed, lim_mag_i_ptsrc=mag_cuts[maxYr], lim_ebv=lim_ebv) summary = [metrics.AreaSummaryMetric(area=18000, reduce_func=np.sum, decreasing=True, metricName='N Galaxies (18k)')] summary.append(metrics.SumMetric(metricName='N Galaxies (all)')) slicer = slicers.HealpixSlicer(nside=nside, useCache=False) bundle = mb.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict, metadata=metadata, mapsList=[dustmap], displayDict=displayDict, summaryMetrics=summary, plotFuncs=subsetPlots) bundleList.append(bundle) ## WL metrics # Calculates the number of visits per pointing, after removing parts of the footprint due to dust/depth subgroupCount += 1 displayDict['subgroup'] = f'{subgroupCount}: WL' displayDict['order'] = 0 sqlconstraint = f'note not like "DD%" and filter = "{bandpass}"' metadata = f'{bandpass} band non-DD' minExpTime = 15 m = metrics.WeakLensingNvisits(m5Col=colmap['fiveSigmaDepth'], expTimeCol=colmap['exptime'], lsstFilter=bandpass, depthlim=mag_cuts[maxYr], ebvlim=lim_ebv, min_expTime=minExpTime) s = slicers.HealpixSlicer(nside=nside, useCache=False) displayDict['caption'] = f'The number of visits per pointing, over the same reduced footprint as ' displayDict['caption'] += f'described above. A cutoff of {minExpTime} removes very short visits.' displayDict['order'] = 1 bundle = mb.MetricBundle(m, s, sqlconstraint, mapsList=[dustmap], metadata=metadata, summaryMetrics=standardStats, displayDict=displayDict) bundleList.append(bundle) # This probably will get replaced by @pgris's SN metrics? subgroupCount += 1 displayDict['subgroup'] = f'{subgroupCount}: SNe Ia' displayDict['order'] = 0 # XXX-- use the light curves from PLASTICC here displayDict['caption'] = 'Fraction of normal SNe Ia (using PLaSTICCs)' sqlconstraint = 'note not like "DD%"' metadata = 'non-DD' slicer = plasticc_slicer(plcs=plasticc_models_dict['SNIa-normal'], seed=42, badval=0) metric = Plasticc_metric(metricName='SNIa') # Set the maskval so that we count missing objects as zero. summary_stats = [metrics.MeanMetric(maskVal=0)] plotFuncs = [plots.HealpixSkyMap()] bundle = mb.MetricBundle(metric, slicer, sqlconstraint, metadata=metadata, summaryMetrics=summary_stats, plotFuncs=plotFuncs, displayDict=displayDict) bundleList.append(bundle) subgroupCount += 1 displayDict['subgroup'] = f'{subgroupCount}: Camera Rotator' displayDict['caption'] = 'Kuiper statistic (0 is uniform, 1 is delta function) of the ' slicer = slicers.HealpixSlicer(nside=nside) metric1 = metrics.KuiperMetric('rotSkyPos') metric2 = metrics.KuiperMetric('rotTelPos') filterlist, colors, filterorders, filtersqls, filtermetadata = filterList(all=False, extraSql=None, extraMetadata=None) for f in filterlist: for m in [metric1, metric2]: plotDict = {'color': colors[f]} displayDict['order'] = filterorders[f] displayDict['caption'] += f"{m.colname} for visits in {f} band." bundleList.append(mb.MetricBundle(m, slicer, filtersqls[f], plotDict=plotDict, displayDict=displayDict, summaryMetrics=standardStats, plotFuncs=subsetPlots)) # Set the runName for all bundles and return the bundleDict. for b in bundleList: b.setRunName(runName) return mb.makeBundlesDictFromList(bundleList)
def scienceRadarBatch(colmap=None, runName='opsim', extraSql=None, extraMetadata=None, nside=64, benchmarkArea=18000, benchmarkNvisits=825, DDF=True): """A batch of metrics for looking at survey performance relative to the SRD and the main science drivers of LSST. Parameters ---------- """ # Hide dependencies from mafContrib.LSSObsStrategy.galaxyCountsMetric_extended import GalaxyCountsMetric_extended from mafContrib import (Plasticc_metric, plasticc_slicer, load_plasticc_lc, TdePopMetric, generateTdePopSlicer, generateMicrolensingSlicer, MicrolensingMetric) if colmap is None: colmap = ColMapDict('fbs') if extraSql is None: extraSql = '' if extraSql == '': joiner = '' else: joiner = ' and ' bundleList = [] # Get some standard per-filter coloring and sql constraints filterlist, colors, filterorders, filtersqls, filtermetadata = filterList(all=False, extraSql=extraSql, extraMetadata=extraMetadata) standardStats = standardSummary(withCount=False) healslicer = slicers.HealpixSlicer(nside=nside) subsetPlots = [plots.HealpixSkyMap(), plots.HealpixHistogram()] # Load up the plastic light curves - SNIa-normal are loaded in descWFDBatch models = ['SNIa-normal', 'KN'] plasticc_models_dict = {} for model in models: plasticc_models_dict[model] = list(load_plasticc_lc(model=model).values()) ######################### # SRD, DM, etc ######################### fOb = fOBatch(runName=runName, colmap=colmap, extraSql=extraSql, extraMetadata=extraMetadata, benchmarkArea=benchmarkArea, benchmarkNvisits=benchmarkNvisits) astromb = astrometryBatch(runName=runName, colmap=colmap, extraSql=extraSql, extraMetadata=extraMetadata) rapidb = rapidRevisitBatch(runName=runName, colmap=colmap, extraSql=extraSql, extraMetadata=extraMetadata) # loop through and modify the display dicts - set SRD as group and their previous 'group' as the subgroup temp_list = [] for key in fOb: temp_list.append(fOb[key]) for key in astromb: temp_list.append(astromb[key]) for key in rapidb: temp_list.append(rapidb[key]) for metricb in temp_list: metricb.displayDict['subgroup'] = metricb.displayDict['group'].replace('SRD', '').lstrip(' ') metricb.displayDict['group'] = 'SRD' bundleList.extend(temp_list) displayDict = {'group': 'SRD', 'subgroup': 'Year Coverage', 'order': 0, 'caption': 'Number of years with observations.'} slicer = slicers.HealpixSlicer(nside=nside) metric = metrics.YearCoverageMetric() for f in filterlist: plotDict = {'colorMin': 7, 'colorMax': 10, 'color': colors[f]} summary = [metrics.AreaSummaryMetric(area=18000, reduce_func=np.mean, decreasing=True, metricName='N Seasons (18k) %s' % f)] bundleList.append(mb.MetricBundle(metric, slicer, filtersqls[f], plotDict=plotDict, metadata=filtermetadata[f], displayDict=displayDict, summaryMetrics=summary)) ######################### # Solar System ######################### # Generally, we need to run Solar System metrics separately; they're a multi-step process. ######################### # Galaxies ######################### displayDict = {'group': 'Galaxies', 'subgroup': 'Galaxy Counts', 'order': 0, 'caption': None} plotDict = {'percentileClip': 95., 'nTicks': 5} sql = extraSql + joiner + 'filter="i"' metadata = combineMetadata(extraMetadata, 'i band') metric = GalaxyCountsMetric_extended(filterBand='i', redshiftBin='all', nside=nside) summary = [metrics.AreaSummaryMetric(area=18000, reduce_func=np.sum, decreasing=True, metricName='N Galaxies (18k)')] summary.append(metrics.SumMetric(metricName='N Galaxies (all)')) # make sure slicer has cache off slicer = slicers.HealpixSlicer(nside=nside, useCache=False) displayDict['caption'] = 'Number of galaxies across the sky, in i band. Generally, full survey footprint.' bundle = mb.MetricBundle(metric, slicer, sql, plotDict=plotDict, metadata=metadata, displayDict=displayDict, summaryMetrics=summary, plotFuncs=subsetPlots) bundleList.append(bundle) displayDict['order'] += 1 ######################### # Cosmology ######################### # note the desc batch does not currently take the extraSql or extraMetadata arguments. descBundleDict = descWFDBatch(colmap=colmap, runName=runName, nside=nside) for d in descBundleDict: bundleList.append(descBundleDict[d]) ######################### # Variables and Transients ######################### displayDict = {'group': 'Variables/Transients', 'subgroup': 'Periodic Stars', 'order': 0, 'caption': None} for period in [0.5, 1, 2,]: for magnitude in [21., 24.]: amplitudes = [0.05, 0.1, 1.0] periods = [period] * len(amplitudes) starMags = [magnitude] * len(amplitudes) plotDict = {'nTicks': 3, 'colorMin': 0, 'colorMax': 3, 'xMin': 0, 'xMax': 3} metadata = combineMetadata('P_%.1f_Mag_%.0f_Amp_0.05-0.1-1' % (period, magnitude), extraMetadata) sql = None displayDict['caption'] = 'Metric evaluates if a periodic signal of period %.1f days could ' \ 'be detected for an r=%i star. A variety of amplitudes of periodicity ' \ 'are tested: [1, 0.1, and 0.05] mag amplitudes, which correspond to ' \ 'metric values of [1, 2, or 3]. ' % (period, magnitude) metric = metrics.PeriodicDetectMetric(periods=periods, starMags=starMags, amplitudes=amplitudes, metricName='PeriodDetection') bundle = mb.MetricBundle(metric, healslicer, sql, metadata=metadata, displayDict=displayDict, plotDict=plotDict, plotFuncs=subsetPlots, summaryMetrics=standardStats) bundleList.append(bundle) displayDict['order'] += 1 # XXX add some PLASTICC metrics for kilovnova and tidal disruption events. displayDict['subgroup'] = 'KN' displayDict['caption'] = 'Fraction of Kilonova (from PLASTICC)' displayDict['order'] = 0 slicer = plasticc_slicer(plcs=plasticc_models_dict['KN'], seed=43, badval=0) metric = Plasticc_metric(metricName='KN') plotFuncs = [plots.HealpixSkyMap()] summary_stats = [metrics.MeanMetric(maskVal=0)] bundle = mb.MetricBundle(metric, slicer, extraSql, runName=runName, summaryMetrics=summary_stats, plotFuncs=plotFuncs, metadata=extraMetadata, displayDict=displayDict) bundleList.append(bundle) # Tidal Disruption Events displayDict['subgroup'] = 'TDE' displayDict['caption'] = 'TDE lightcurves that could be identified' metric = TdePopMetric() slicer = generateTdePopSlicer() sql = '' plotDict = {'reduceFunc': np.sum, 'nside': 128} plotFuncs = [plots.HealpixSkyMap()] bundle = mb.MetricBundle(metric, slicer, sql, runName=runName, plotDict=plotDict, plotFuncs=plotFuncs, summaryMetrics=[metrics.MeanMetric(maskVal=0)], displayDict=displayDict) bundleList.append(bundle) # Microlensing events displayDict['subgroup'] = 'Microlensing' displayDict['caption'] = 'Fast microlensing events' plotDict = {'nside': 128} sql = '' slicer = generateMicrolensingSlicer(min_crossing_time=1, max_crossing_time=10) metric = MicrolensingMetric(metricName='Fast Microlensing') bundle = mb.MetricBundle(metric, slicer, sql, runName=runName, summaryMetrics=[metrics.MeanMetric(maskVal=0)], plotFuncs=[plots.HealpixSkyMap()], metadata=extraMetadata, displayDict=displayDict, plotDict=plotDict) bundleList.append(bundle) displayDict['caption'] = 'Slow microlensing events' slicer = generateMicrolensingSlicer(min_crossing_time=100, max_crossing_time=1500) metric = MicrolensingMetric(metricName='Slow Microlensing') bundle = mb.MetricBundle(metric, slicer, sql, runName=runName, summaryMetrics=[metrics.MeanMetric(maskVal=0)], plotFuncs=[plots.HealpixSkyMap()], metadata=extraMetadata, displayDict=displayDict, plotDict=plotDict) bundleList.append(bundle) ######################### # Milky Way ######################### displayDict = {'group': 'Milky Way', 'subgroup': ''} displayDict['subgroup'] = 'N stars' slicer = slicers.HealpixSlicer(nside=nside, useCache=False) sum_stats = [metrics.SumMetric(metricName='Total N Stars, crowding')] for f in filterlist: stellar_map = maps.StellarDensityMap(filtername=f) displayDict['order'] = filterorders[f] displayDict['caption'] = 'Number of stars in %s band with an measurement error due to crowding ' \ 'of less than 0.2 mag' % f # Configure the NstarsMetric - note 'filtername' refers to the filter in which to evaluate crowding metric = metrics.NstarsMetric(crowding_error=0.2, filtername=f, ignore_crowding=False, seeingCol=colmap['seeingGeom'], m5Col=colmap['fiveSigmaDepth'], maps=[]) plotDict = {'nTicks': 5, 'logScale': True, 'colorMin': 100} bundle = mb.MetricBundle(metric, slicer, filtersqls[f], runName=runName, summaryMetrics=sum_stats, plotFuncs=subsetPlots, plotDict=plotDict, displayDict=displayDict, mapsList=[stellar_map]) bundleList.append(bundle) slicer = slicers.HealpixSlicer(nside=nside, useCache=False) sum_stats = [metrics.SumMetric(metricName='Total N Stars, no crowding')] for f in filterlist: stellar_map = maps.StellarDensityMap(filtername=f) displayDict['order'] = filterorders[f] displayDict['caption'] = 'Number of stars in %s band with an measurement error ' \ 'of less than 0.2 mag, not considering crowding' % f # Configure the NstarsMetric - note 'filtername' refers to the filter in which to evaluate crowding metric = metrics.NstarsMetric(crowding_error=0.2, filtername=f, ignore_crowding=True, seeingCol=colmap['seeingGeom'], m5Col=colmap['fiveSigmaDepth'], metricName='Nstars_no_crowding', maps=[]) plotDict = {'nTicks': 5, 'logScale': True, 'colorMin': 100} bundle = mb.MetricBundle(metric, slicer, filtersqls[f], runName=runName, summaryMetrics=sum_stats, plotFuncs=subsetPlots, plotDict=plotDict, displayDict=displayDict, mapsList=[stellar_map]) bundleList.append(bundle) ######################### # DDF ######################### if DDF: # Hide this import to avoid adding a dependency. from lsst.sims.featureScheduler.surveys import generate_dd_surveys, Deep_drilling_survey ddf_surveys = generate_dd_surveys() # Add on the Euclid fields # XXX--to update. Should have a spot where all the DDF locations are stored. ddf_surveys.append(Deep_drilling_survey([], 58.97, -49.28, survey_name='DD:EDFSa')) ddf_surveys.append(Deep_drilling_survey([], 63.6, -47.60, survey_name='DD:EDFSb')) # For doing a high-res sampling of the DDF for co-adds ddf_radius = 1.8 # Degrees ddf_nside = 512 ra, dec = hpid2RaDec(ddf_nside, np.arange(hp.nside2npix(ddf_nside))) displayDict = {'group': 'DDF depths', 'subgroup': None} for survey in ddf_surveys: displayDict['subgroup'] = survey.survey_name # Crop off the u-band only DDF if survey.survey_name[0:4] != 'DD:u': dist_to_ddf = angularSeparation(ra, dec, np.degrees(survey.ra), np.degrees(survey.dec)) goodhp = np.where(dist_to_ddf <= ddf_radius) slicer = slicers.UserPointsSlicer(ra=ra[goodhp], dec=dec[goodhp], useCamera=False) for f in filterlist: metric = metrics.Coaddm5Metric(metricName=survey.survey_name + ', ' + f) summary = [metrics.MedianMetric(metricName='Median depth ' + survey.survey_name+', ' + f)] plotDict = {'color': colors[f]} sql = filtersqls[f] displayDict['order'] = filterorders[f] displayDict['caption'] = 'Coadded m5 depth in %s band.' % (f) bundle = mb.MetricBundle(metric, slicer, sql, metadata=filtermetadata[f], displayDict=displayDict, summaryMetrics=summary, plotFuncs=[], plotDict=plotDict) bundleList.append(bundle) displayDict = {'group': 'DDF Transients', 'subgroup': None} for survey in ddf_surveys: displayDict['subgroup'] = survey.survey_name if survey.survey_name[0:4] != 'DD:u': slicer = plasticc_slicer(plcs=plasticc_models_dict['SNIa-normal'], seed=42, ra_cen=survey.ra, dec_cen=survey.dec, radius=np.radians(3.), useCamera=False) metric = Plasticc_metric(metricName=survey.survey_name+' SNIa') sql = extraSql summary_stats = [metrics.MeanMetric(maskVal=0)] plotFuncs = [plots.HealpixSkyMap()] bundle = mb.MetricBundle(metric, slicer, sql, runName=runName, summaryMetrics=summary_stats, plotFuncs=plotFuncs, metadata=extraMetadata, displayDict=displayDict) bundleList.append(bundle) displayDict['order'] = 10 # Set the runName for all bundles and return the bundleDict. for b in bundleList: b.setRunName(runName) bundleDict = mb.makeBundlesDictFromList(bundleList) return bundleDict
import matplotlib.pyplot as plt #%matplotlib inline import lsst.sims.maf.db as db import lsst.sims.maf.metrics as metrics import lsst.sims.maf.slicers as slicers import lsst.sims.maf.metricBundles as metricBundles from lsst.sims.maf.plots import PlotHandler from SpacialOverlapMetric import SpacialOverlapMetric database='astro-lsst-01_2022.db' databasename='astro-lsst-01_2022' opsdb = db.OpsimDatabase(database) OutDir='Overlap_OGLE' region_name='stripe82' region_name='OGLE_disk' spacialoverlapmetric=SpacialOverlapMetric(region_name=region_name) slicer = slicers.OpsimFieldSlicer() slicer = slicers.HealpixSlicer(nside=64) Title='Spacial Overlap Metric for '+databasename+'\n'+region_name plotDict={'title':Title} sqlconstraint = '' SOmetric = metricBundles.MetricBundle(spacialoverlapmetric, slicer, sqlconstraint, plotDict=plotDict, runName=databasename) summaryMetrics = [metrics.SumMetric()] SOmetric.setSummaryMetrics(summaryMetrics) bundleDict = {'spacial':SOmetric} resultsDb=db.ResultsDb(outDir=OutDir) group = metricBundles.MetricBundleGroup(bundleDict, opsdb, outDir=OutDir, resultsDb=resultsDb) group.runAll() group.plotAll(closefigs=False) print("Summary", SOmetric.summaryValues)