def scienceRadarBatch(colmap=None, runName='opsim', extraSql=None, extraMetadata=None, nside=64, benchmarkArea=18000, benchmarkNvisits=825, DDF=True): """A batch of metrics for looking at survey performance relative to the SRD and the main science drivers of LSST. Parameters ---------- """ # Hide dependencies from mafContrib.LSSObsStrategy.galaxyCountsMetric_extended import GalaxyCountsMetric_extended from mafContrib import Plasticc_metric, plasticc_slicer, load_plasticc_lc, TDEsAsciiMetric if colmap is None: colmap = ColMapDict('fbs') if extraSql is None: extraSql = '' if extraSql == '': joiner = '' else: joiner = ' and ' bundleList = [] # Get some standard per-filter coloring and sql constraints filterlist, colors, filterorders, filtersqls, filtermetadata = filterList( all=False, extraSql=extraSql, extraMetadata=extraMetadata) standardStats = standardSummary(withCount=False) healslicer = slicers.HealpixSlicer(nside=nside) subsetPlots = [plots.HealpixSkyMap(), plots.HealpixHistogram()] # Load up the plastic light curves models = ['SNIa-normal', 'KN'] plasticc_models_dict = {} for model in models: plasticc_models_dict[model] = list( load_plasticc_lc(model=model).values()) ######################### # SRD, DM, etc ######################### fOb = fOBatch(runName=runName, colmap=colmap, extraSql=extraSql, extraMetadata=extraMetadata, benchmarkArea=benchmarkArea, benchmarkNvisits=benchmarkNvisits) astromb = astrometryBatch(runName=runName, colmap=colmap, extraSql=extraSql, extraMetadata=extraMetadata) rapidb = rapidRevisitBatch(runName=runName, colmap=colmap, extraSql=extraSql, extraMetadata=extraMetadata) # loop through and modify the display dicts - set SRD as group and their previous 'group' as the subgroup temp_list = [] for key in fOb: temp_list.append(fOb[key]) for key in astromb: temp_list.append(astromb[key]) for key in rapidb: temp_list.append(rapidb[key]) for metricb in temp_list: metricb.displayDict['subgroup'] = metricb.displayDict['group'].replace( 'SRD', '').lstrip(' ') metricb.displayDict['group'] = 'SRD' bundleList.extend(temp_list) displayDict = { 'group': 'SRD', 'subgroup': 'Year Coverage', 'order': 0, 'caption': 'Number of years with observations.' } slicer = slicers.HealpixSlicer(nside=nside) metric = metrics.YearCoverageMetric() for f in filterlist: plotDict = {'colorMin': 7, 'colorMax': 10, 'color': colors[f]} summary = [ metrics.AreaSummaryMetric(area=18000, reduce_func=np.mean, decreasing=True, metricName='N Seasons (18k) %s' % f) ] bundleList.append( mb.MetricBundle(metric, slicer, filtersqls[f], plotDict=plotDict, metadata=filtermetadata[f], displayDict=displayDict, summaryMetrics=summary)) ######################### # Solar System ######################### # Generally, we need to run Solar System metrics separately; they're a multi-step process. ######################### # Cosmology ######################### displayDict = { 'group': 'Cosmology', 'subgroup': 'Galaxy Counts', 'order': 0, 'caption': None } plotDict = {'percentileClip': 95., 'nTicks': 5} sql = extraSql + joiner + 'filter="i"' metadata = combineMetadata(extraMetadata, 'i band') metric = GalaxyCountsMetric_extended(filterBand='i', redshiftBin='all', nside=nside) summary = [ metrics.AreaSummaryMetric(area=18000, reduce_func=np.sum, decreasing=True, metricName='N Galaxies (18k)') ] summary.append(metrics.SumMetric(metricName='N Galaxies (all)')) # make sure slicer has cache off slicer = slicers.HealpixSlicer(nside=nside, useCache=False) bundle = mb.MetricBundle(metric, slicer, sql, plotDict=plotDict, metadata=metadata, displayDict=displayDict, summaryMetrics=summary, plotFuncs=subsetPlots) bundleList.append(bundle) displayDict['order'] += 1 # let's put Type Ia SN in here displayDict['subgroup'] = 'SNe Ia' # XXX-- use the light curves from PLASTICC here displayDict['caption'] = 'Fraction of normal SNe Ia' sql = extraSql slicer = plasticc_slicer(plcs=plasticc_models_dict['SNIa-normal'], seed=42, badval=0) metric = Plasticc_metric(metricName='SNIa') # Set the maskval so that we count missing objects as zero. summary_stats = [metrics.MeanMetric(maskVal=0)] plotFuncs = [plots.HealpixSkyMap()] bundle = mb.MetricBundle(metric, slicer, sql, runName=runName, summaryMetrics=summary_stats, plotFuncs=plotFuncs, metadata=extraMetadata, displayDict=displayDict) bundleList.append(bundle) displayDict['order'] += 1 displayDict['subgroup'] = 'Camera Rotator' displayDict[ 'caption'] = 'Kuiper statistic (0 is uniform, 1 is delta function) of the ' slicer = slicers.HealpixSlicer(nside=nside) metric1 = metrics.KuiperMetric('rotSkyPos') metric2 = metrics.KuiperMetric('rotTelPos') for f in filterlist: for m in [metric1, metric2]: plotDict = {'color': colors[f]} displayDict['order'] = filterorders[f] displayDict['caption'] += f"{m.colname} for visits in {f} band." bundleList.append( mb.MetricBundle(m, slicer, filtersqls[f], plotDict=plotDict, displayDict=displayDict, summaryMetrics=standardStats, plotFuncs=subsetPlots)) # XXX--need some sort of metric for weak lensing ######################### # Variables and Transients ######################### displayDict = { 'group': 'Variables/Transients', 'subgroup': 'Periodic Stars', 'order': 0, 'caption': None } for period in [ 0.5, 1, 2, ]: for magnitude in [21., 24.]: amplitudes = [0.05, 0.1, 1.0] periods = [period] * len(amplitudes) starMags = [magnitude] * len(amplitudes) plotDict = { 'nTicks': 3, 'colorMin': 0, 'colorMax': 3, 'xMin': 0, 'xMax': 3 } metadata = combineMetadata( 'P_%.1f_Mag_%.0f_Amp_0.05-0.1-1' % (period, magnitude), extraMetadata) sql = None displayDict['caption'] = 'Metric evaluates if a periodic signal of period %.1f days could ' \ 'be detected for an r=%i star. A variety of amplitudes of periodicity ' \ 'are tested: [1, 0.1, and 0.05] mag amplitudes, which correspond to ' \ 'metric values of [1, 2, or 3]. ' % (period, magnitude) metric = metrics.PeriodicDetectMetric(periods=periods, starMags=starMags, amplitudes=amplitudes, metricName='PeriodDetection') bundle = mb.MetricBundle(metric, healslicer, sql, metadata=metadata, displayDict=displayDict, plotDict=plotDict, plotFuncs=subsetPlots, summaryMetrics=standardStats) bundleList.append(bundle) displayDict['order'] += 1 # XXX add some PLASTICC metrics for kilovnova and tidal disruption events. displayDict['subgroup'] = 'KN' displayDict['caption'] = 'Fraction of Kilonova (from PLASTICC)' displayDict['order'] = 0 slicer = plasticc_slicer(plcs=plasticc_models_dict['KN'], seed=43, badval=0) metric = Plasticc_metric(metricName='KN') plotFuncs = [plots.HealpixSkyMap()] summary_stats = [metrics.MeanMetric(maskVal=0)] bundle = mb.MetricBundle(metric, slicer, extraSql, runName=runName, summaryMetrics=summary_stats, plotFuncs=plotFuncs, metadata=extraMetadata, displayDict=displayDict) bundleList.append(bundle) # Tidal Disruption Events displayDict['subgroup'] = 'TDE' displayDict[ 'caption'] = 'Fraction of TDE lightcurves that could be identified, outside of DD fields' detectSNR = {'u': 5, 'g': 5, 'r': 5, 'i': 5, 'z': 5, 'y': 5} # light curve parameters epochStart = -22 peakEpoch = 0 nearPeakT = 10 postPeakT = 14 # two weeks nPhaseCheck = 1 # condition parameters nObsTotal = {'u': 0, 'g': 0, 'r': 0, 'i': 0, 'z': 0, 'y': 0} nObsPrePeak = 1 nObsNearPeak = {'u': 0, 'g': 0, 'r': 0, 'i': 0, 'z': 0, 'y': 0} nFiltersNearPeak = 3 nObsPostPeak = 0 nFiltersPostPeak = 2 metric = TDEsAsciiMetric(asciifile=None, detectSNR=detectSNR, epochStart=epochStart, peakEpoch=peakEpoch, nearPeakT=nearPeakT, postPeakT=postPeakT, nPhaseCheck=nPhaseCheck, nObsTotal=nObsTotal, nObsPrePeak=nObsPrePeak, nObsNearPeak=nObsNearPeak, nFiltersNearPeak=nFiltersNearPeak, nObsPostPeak=nObsPostPeak, nFiltersPostPeak=nFiltersPostPeak) slicer = slicers.HealpixSlicer(nside=32) sql = extraSql + joiner + "note not like '%DD%'" md = extraMetadata if md is None: md = " NonDD" else: md += 'NonDD' bundle = mb.MetricBundle(metric, slicer, sql, runName=runName, summaryMetrics=standardStats, plotFuncs=plotFuncs, metadata=md, displayDict=displayDict) bundleList.append(bundle) # XXX -- would be good to add some microlensing events, for both MW and LMC/SMC. ######################### # Milky Way ######################### displayDict = {'group': 'Milky Way', 'subgroup': ''} displayDict['subgroup'] = 'N stars' slicer = slicers.HealpixSlicer(nside=nside, useCache=False) sum_stats = [metrics.SumMetric(metricName='Total N Stars')] for f in filterlist: displayDict['order'] = filterorders[f] displayDict['caption'] = 'Number of stars in %s band with an measurement error due to crowding ' \ 'of less than 0.1 mag' % f # Configure the NstarsMetric - note 'filtername' refers to the filter in which to evaluate crowding metric = metrics.NstarsMetric(crowding_error=0.1, filtername='r', seeingCol=colmap['seeingGeom'], m5Col=colmap['fiveSigmaDepth']) plotDict = {'nTicks': 5, 'logScale': True, 'colorMin': 100} bundle = mb.MetricBundle(metric, slicer, filtersqls[f], runName=runName, summaryMetrics=sum_stats, plotFuncs=subsetPlots, plotDict=plotDict, displayDict=displayDict) bundleList.append(bundle) ######################### # DDF ######################### if DDF: # Hide this import to avoid adding a dependency. from lsst.sims.featureScheduler.surveys import generate_dd_surveys, Deep_drilling_survey ddf_surveys = generate_dd_surveys() # Add on the Euclid fields # XXX--to update. Should have a spot where all the DDF locations are stored. ddf_surveys.append( Deep_drilling_survey([], 58.97, -49.28, survey_name='DD:EDFSa')) ddf_surveys.append( Deep_drilling_survey([], 63.6, -47.60, survey_name='DD:EDFSb')) # For doing a high-res sampling of the DDF for co-adds ddf_radius = 1.8 # Degrees ddf_nside = 512 ra, dec = hpid2RaDec(ddf_nside, np.arange(hp.nside2npix(ddf_nside))) displayDict = {'group': 'DDF depths', 'subgroup': None} for survey in ddf_surveys: displayDict['subgroup'] = survey.survey_name # Crop off the u-band only DDF if survey.survey_name[0:4] != 'DD:u': dist_to_ddf = angularSeparation(ra, dec, np.degrees(survey.ra), np.degrees(survey.dec)) goodhp = np.where(dist_to_ddf <= ddf_radius) slicer = slicers.UserPointsSlicer(ra=ra[goodhp], dec=dec[goodhp], useCamera=False) for f in filterlist: metric = metrics.Coaddm5Metric( metricName=survey.survey_name + ', ' + f) summary = [ metrics.MedianMetric(metricName='Median depth ' + survey.survey_name + ', ' + f) ] plotDict = {'color': colors[f]} sql = filtersqls[f] displayDict['order'] = filterorders[f] displayDict['caption'] = 'Coadded m5 depth in %s band.' % ( f) bundle = mb.MetricBundle(metric, slicer, sql, metadata=filtermetadata[f], displayDict=displayDict, summaryMetrics=summary, plotFuncs=[], plotDict=plotDict) bundleList.append(bundle) displayDict = {'group': 'DDF Transients', 'subgroup': None} for survey in ddf_surveys: displayDict['subgroup'] = survey.survey_name if survey.survey_name[0:4] != 'DD:u': slicer = plasticc_slicer( plcs=plasticc_models_dict['SNIa-normal'], seed=42, ra_cen=survey.ra, dec_cen=survey.dec, radius=np.radians(3.), useCamera=False) metric = Plasticc_metric(metricName=survey.survey_name + ' SNIa') sql = extraSql summary_stats = [metrics.MeanMetric(maskVal=0)] plotFuncs = [plots.HealpixSkyMap()] bundle = mb.MetricBundle(metric, slicer, sql, runName=runName, summaryMetrics=summary_stats, plotFuncs=plotFuncs, metadata=extraMetadata, displayDict=displayDict) bundleList.append(bundle) displayDict['order'] = 10 # Set the runName for all bundles and return the bundleDict. for b in bundleList: b.setRunName(runName) bundleDict = mb.makeBundlesDictFromList(bundleList) return bundleDict
def generate_dd_surveys(nside=None, nexp=2, detailers=None, reward_value=100): """Utility to return a list of standard deep drilling field surveys. XXX-Someone double check that I got the coordinates right! """ surveys = [] # ELAIS S1 RA = 9.45 dec = -44. survey_name = 'DD:ELAISS1' ha_limits = ([0., 1.5], [21.5, 24.]) bfs = dd_bfs(RA, dec, survey_name, ha_limits) surveys.append( Deep_drilling_survey(bfs, RA, dec, sequence='rgizy', nvis=[20, 10, 20, 26, 20], survey_name=survey_name, reward_value=reward_value, nside=nside, nexp=nexp, detailers=detailers)) survey_name = 'DD:u,ELAISS1' bfs = dd_u_bfs(RA, dec, survey_name, ha_limits) surveys.append( Deep_drilling_survey(bfs, RA, dec, sequence='u', nvis=[8], survey_name=survey_name, reward_value=reward_value, nside=nside, nexp=nexp, detailers=detailers)) # XMM-LSS survey_name = 'DD:XMM-LSS' RA = 35.708333 dec = -4 - 45 / 60. ha_limits = ([0., 1.5], [21.5, 24.]) bfs = dd_bfs(RA, dec, survey_name, ha_limits) surveys.append( Deep_drilling_survey(bfs, RA, dec, sequence='rgizy', nvis=[20, 10, 20, 26, 20], survey_name=survey_name, reward_value=reward_value, nside=nside, nexp=nexp, detailers=detailers)) survey_name = 'DD:u,XMM-LSS' bfs = dd_u_bfs(RA, dec, survey_name, ha_limits) surveys.append( Deep_drilling_survey(bfs, RA, dec, sequence='u', nvis=[8], survey_name=survey_name, reward_value=reward_value, nside=nside, nexp=nexp, detailers=detailers)) # Extended Chandra Deep Field South RA = 53.125 dec = -28. - 6 / 60. survey_name = 'DD:ECDFS' ha_limits = [[0.5, 3.0], [20., 22.5]] bfs = dd_bfs(RA, dec, survey_name, ha_limits) surveys.append( Deep_drilling_survey(bfs, RA, dec, sequence='rgizy', nvis=[20, 10, 20, 26, 20], survey_name=survey_name, reward_value=reward_value, nside=nside, nexp=nexp, detailers=detailers)) survey_name = 'DD:u,ECDFS' bfs = dd_u_bfs(RA, dec, survey_name, ha_limits) surveys.append( Deep_drilling_survey(bfs, RA, dec, sequence='u', nvis=[8], survey_name=survey_name, reward_value=reward_value, nside=nside, nexp=nexp, detailers=detailers)) # COSMOS RA = 150.1 dec = 2. + 10. / 60. + 55 / 3600. survey_name = 'DD:COSMOS' ha_limits = ([0., 2.5], [21.5, 24.]) bfs = dd_bfs(RA, dec, survey_name, ha_limits) surveys.append( Deep_drilling_survey(bfs, RA, dec, sequence='rgizy', nvis=[20, 10, 20, 26, 20], survey_name=survey_name, reward_value=reward_value, nside=nside, nexp=nexp, detailers=detailers)) survey_name = 'DD:u,COSMOS' bfs = dd_u_bfs(RA, dec, survey_name, ha_limits) surveys.append( Deep_drilling_survey(bfs, RA, dec, sequence='u', nvis=[8], survey_name=survey_name, reward_value=reward_value, nside=nside, nexp=nexp, detailers=detailers)) # Euclid Fields survey_name = 'DD:EDFS1' RA = 58.97 dec = -49.28 ha_limits = ([0., 1.5], [21.5, 24.]) bfs = dd_bfs(RA, dec, survey_name, ha_limits) surveys.append( Deep_drilling_survey(bfs, RA, dec, sequence='rgizy', nvis=[5, 7, 19, 24, 5], survey_name=survey_name, reward_value=reward_value, nside=nside, nexp=nexp, detailers=detailers)) survey_name = 'DD:u,EDFS1' bfs = dd_u_bfs(RA, dec, survey_name, ha_limits) surveys.append( Deep_drilling_survey(bfs, RA, dec, sequence='u', nvis=[8], survey_name=survey_name, reward_value=reward_value, nside=nside, nexp=nexp, detailers=detailers)) survey_name = 'DD:EDFS2' RA = 63.6 dec = -47.60 ha_limits = ([0., 1.5], [21.5, 24.]) bfs = dd_bfs(RA, dec, survey_name, ha_limits) surveys.append( Deep_drilling_survey(bfs, RA, dec, sequence='rgizy', nvis=[5, 7, 19, 24, 5], survey_name=survey_name, reward_value=reward_value, nside=nside, nexp=nexp, detailers=detailers)) survey_name = 'DD:u,EDFS2' bfs = dd_u_bfs(RA, dec, survey_name, ha_limits) surveys.append( Deep_drilling_survey(bfs, RA, dec, sequence='u', nvis=[8], survey_name=survey_name, reward_value=reward_value, nside=nside, nexp=nexp, detailers=detailers)) return surveys
def generate_dd_surveys(nside=None, nexp=2, detailers=None): """Utility to return a list of standard deep drilling field surveys. XXX-Someone double check that I got the coordinates right! """ surveys = [] # ELAIS S1 RA = 9.45 dec = -44. survey_name = 'DD:ELAISS1' ha_limits = ([0., 1.5], [21.5, 24.]) bfs = dd_bfs(RA, dec, survey_name, ha_limits) surveys.append( Deep_drilling_survey(bfs, RA, dec, sequence='grizy', nvis=[1, 1, 3, 5, 4], survey_name=survey_name, reward_value=100, nside=nside, nexp=nexp, detailers=detailers)) survey_name = 'DD:u,ELAISS1' bfs = dd_u_bfs(RA, dec, survey_name, ha_limits) surveys.append( Deep_drilling_survey(bfs, RA, dec, sequence='u', nvis=[8], survey_name=survey_name, reward_value=100, nside=nside, nexp=nexp, detailers=detailers)) # XMM-LSS survey_name = 'DD:XMM-LSS' RA = 35.708333 dec = -4 - 45 / 60. ha_limits = ([0., 1.5], [21.5, 24.]) bfs = dd_bfs(RA, dec, survey_name, ha_limits) surveys.append( Deep_drilling_survey(bfs, RA, dec, sequence='grizy', nvis=[1, 1, 3, 5, 4], survey_name=survey_name, reward_value=100, nside=nside, nexp=nexp, detailers=detailers)) survey_name = 'DD:u,XMM-LSS' bfs = dd_u_bfs(RA, dec, survey_name, ha_limits) surveys.append( Deep_drilling_survey(bfs, RA, dec, sequence='u', nvis=[8], survey_name=survey_name, reward_value=100, nside=nside, nexp=nexp, detailers=detailers)) # Extended Chandra Deep Field South RA = 53.125 dec = -28. - 6 / 60. survey_name = 'DD:ECDFS' ha_limits = [[0.5, 3.0], [20., 22.5]] bfs = dd_bfs(RA, dec, survey_name, ha_limits) surveys.append( Deep_drilling_survey(bfs, RA, dec, sequence='grizy', nvis=[1, 1, 3, 5, 4], survey_name=survey_name, reward_value=100, nside=nside, nexp=nexp, detailers=detailers)) survey_name = 'DD:u,ECDFS' bfs = dd_u_bfs(RA, dec, survey_name, ha_limits) surveys.append( Deep_drilling_survey(bfs, RA, dec, sequence='u', nvis=[8], survey_name=survey_name, reward_value=100, nside=nside, nexp=nexp, detailers=detailers)) # COSMOS RA = 150.1 dec = 2. + 10. / 60. + 55 / 3600. survey_name = 'DD:COSMOS' ha_limits = ([0., 2.5], [21.5, 24.]) bfs = dd_bfs(RA, dec, survey_name, ha_limits) surveys.append( Deep_drilling_survey(bfs, RA, dec, sequence='grizy', nvis=[1, 1, 3, 5, 4], survey_name=survey_name, reward_value=100, nside=nside, nexp=nexp, detailers=detailers)) survey_name = 'DD:u,COSMOS' bfs = dd_u_bfs(RA, dec, survey_name, ha_limits) surveys.append( Deep_drilling_survey(bfs, RA, dec, sequence='u', nvis=[8], survey_name=survey_name, reward_value=100, nside=nside, nexp=nexp, detailers=detailers)) # Extra DD Field, just to get to 5. Still not closed on this one survey_name = 'DD:290' RA = 349.386443 dec = -63.321004 ha_limits = ([0., 1.5], [21.5, 24.]) bfs = dd_bfs(RA, dec, survey_name, ha_limits) surveys.append( Deep_drilling_survey(bfs, RA, dec, sequence='grizy', nvis=[1, 1, 3, 5, 4], survey_name=survey_name, reward_value=100, nside=nside, nexp=nexp, detailers=detailers)) survey_name = 'DD:u,290' bfs = dd_u_bfs(RA, dec, survey_name, ha_limits) surveys.append( Deep_drilling_survey(bfs, RA, dec, sequence='u', nvis=[8], survey_name=survey_name, reward_value=100, nside=nside, nexp=nexp, detailers=detailers)) return surveys
def generate_dd_surveys(nside=None, nexp=2, detailers=None): """Utility to return a list of standard deep drilling field surveys. XXX-Someone double check that I got the coordinates right! """ surveys = [] # ELAIS S1 RA = 9.45 dec = -44. survey_name = 'DD:ELAISS1' ha_limits = ([0., 1.5], [21.5, 24.]) bfs = dd_bfs(RA, dec, survey_name, ha_limits) surveys.append( Deep_drilling_survey(bfs, RA, dec, sequence='grizy', nvis=[1, 1, 3, 5, 4], survey_name=survey_name, reward_value=100, nside=nside, nexp=nexp, detailers=detailers)) survey_name = 'DD:u,ELAISS1' bfs = dd_u_bfs(RA, dec, survey_name, ha_limits) surveys.append( Deep_drilling_survey(bfs, RA, dec, sequence='u', nvis=[8], survey_name=survey_name, reward_value=100, nside=nside, nexp=nexp, detailers=detailers)) # XMM-LSS survey_name = 'DD:XMM-LSS' RA = 35.708333 dec = -4 - 45 / 60. ha_limits = ([0., 1.5], [21.5, 24.]) bfs = dd_bfs(RA, dec, survey_name, ha_limits) surveys.append( Deep_drilling_survey(bfs, RA, dec, sequence='grizy', nvis=[1, 1, 3, 5, 4], survey_name=survey_name, reward_value=100, nside=nside, nexp=nexp, detailers=detailers)) survey_name = 'DD:u,XMM-LSS' bfs = dd_u_bfs(RA, dec, survey_name, ha_limits) surveys.append( Deep_drilling_survey(bfs, RA, dec, sequence='u', nvis=[8], survey_name=survey_name, reward_value=100, nside=nside, nexp=nexp, detailers=detailers)) # Extended Chandra Deep Field South RA = 53.125 dec = -28. - 6 / 60. survey_name = 'DD:ECDFS' ha_limits = [[0.5, 3.0], [20., 22.5]] bfs = dd_bfs(RA, dec, survey_name, ha_limits) surveys.append( Deep_drilling_survey(bfs, RA, dec, sequence='grizy', nvis=[1, 1, 3, 5, 4], survey_name=survey_name, reward_value=100, nside=nside, nexp=nexp, detailers=detailers)) survey_name = 'DD:u,ECDFS' bfs = dd_u_bfs(RA, dec, survey_name, ha_limits) surveys.append( Deep_drilling_survey(bfs, RA, dec, sequence='u', nvis=[8], survey_name=survey_name, reward_value=100, nside=nside, nexp=nexp, detailers=detailers)) # COSMOS RA = 150.1 dec = 2. + 10. / 60. + 55 / 3600. survey_name = 'DD:COSMOS' ha_limits = ([0., 2.5], [21.5, 24.]) bfs = dd_bfs(RA, dec, survey_name, ha_limits) surveys.append( Deep_drilling_survey(bfs, RA, dec, sequence='grizy', nvis=[1, 1, 3, 5, 4], survey_name=survey_name, reward_value=100, nside=nside, nexp=nexp, detailers=detailers)) survey_name = 'DD:u,COSMOS' bfs = dd_u_bfs(RA, dec, survey_name, ha_limits) surveys.append( Deep_drilling_survey(bfs, RA, dec, sequence='u', nvis=[8], survey_name=survey_name, reward_value=100, nside=nside, nexp=nexp, detailers=detailers)) # Euclid Fields # I can use the sequence kwarg to do two positions per sequence filters = 'grizy' nviss = [1, 1, 3, 5, 4] survey_name = 'DD:EDFS' # Note the sequences need to be in radians since they are using observation objects directly RAs = np.radians([58.97, 63.6]) decs = np.radians([-49.28, -47.60]) sequence = [] exptime = 30 for filtername, nvis in zip(filters, nviss): for ra, dec in zip(RAs, decs): for num in range(nvis): obs = empty_observation() obs['filter'] = filtername obs['exptime'] = exptime obs['RA'] = ra obs['dec'] = dec obs['nexp'] = nexp obs['note'] = survey_name sequence.append(obs) ha_limits = ([0., 1.5], [22.5, 24.]) # And back to degrees for the basis function bfs = dd_bfs(np.degrees(RAs[0]), np.degrees(decs[0]), survey_name, ha_limits) surveys.append( Deep_drilling_survey(bfs, RA, dec, sequence=sequence, survey_name=survey_name, reward_value=100, nside=nside, nexp=nexp, detailers=detailers)) filters = 'u' nviss = [4] survey_name = 'DD:u, EDFS' sequence = [] exptime = 30 for filtername, nvis in zip(filters, nviss): for ra, dec in zip(RAs, decs): for num in range(nvis): obs = empty_observation() obs['filter'] = filtername obs['exptime'] = exptime obs['RA'] = ra obs['dec'] = dec obs['nexp'] = nexp obs['note'] = survey_name sequence.append(obs) bfs = dd_u_bfs(np.degrees(RAs[0]), np.degrees(decs[0]), survey_name, ha_limits) surveys.append( Deep_drilling_survey(bfs, RA, dec, sequence=sequence, survey_name=survey_name, reward_value=100, nside=nside, nexp=nexp, detailers=detailers)) return surveys
def scienceRadarBatch(colmap=None, runName='opsim', extraSql=None, extraMetadata=None, nside=64, benchmarkArea=18000, benchmarkNvisits=825, DDF=True): """A batch of metrics for looking at survey performance relative to the SRD and the main science drivers of LSST. Parameters ---------- """ # Hide dependencies from mafContrib.LSSObsStrategy.galaxyCountsMetric_extended import GalaxyCountsMetric_extended from mafContrib import (Plasticc_metric, plasticc_slicer, load_plasticc_lc, TdePopMetric, generateTdePopSlicer, generateMicrolensingSlicer, MicrolensingMetric) if colmap is None: colmap = ColMapDict('fbs') if extraSql is None: extraSql = '' if extraSql == '': joiner = '' else: joiner = ' and ' bundleList = [] # Get some standard per-filter coloring and sql constraints filterlist, colors, filterorders, filtersqls, filtermetadata = filterList(all=False, extraSql=extraSql, extraMetadata=extraMetadata) standardStats = standardSummary(withCount=False) healslicer = slicers.HealpixSlicer(nside=nside) subsetPlots = [plots.HealpixSkyMap(), plots.HealpixHistogram()] # Load up the plastic light curves - SNIa-normal are loaded in descWFDBatch models = ['SNIa-normal', 'KN'] plasticc_models_dict = {} for model in models: plasticc_models_dict[model] = list(load_plasticc_lc(model=model).values()) ######################### # SRD, DM, etc ######################### fOb = fOBatch(runName=runName, colmap=colmap, extraSql=extraSql, extraMetadata=extraMetadata, benchmarkArea=benchmarkArea, benchmarkNvisits=benchmarkNvisits) astromb = astrometryBatch(runName=runName, colmap=colmap, extraSql=extraSql, extraMetadata=extraMetadata) rapidb = rapidRevisitBatch(runName=runName, colmap=colmap, extraSql=extraSql, extraMetadata=extraMetadata) # loop through and modify the display dicts - set SRD as group and their previous 'group' as the subgroup temp_list = [] for key in fOb: temp_list.append(fOb[key]) for key in astromb: temp_list.append(astromb[key]) for key in rapidb: temp_list.append(rapidb[key]) for metricb in temp_list: metricb.displayDict['subgroup'] = metricb.displayDict['group'].replace('SRD', '').lstrip(' ') metricb.displayDict['group'] = 'SRD' bundleList.extend(temp_list) displayDict = {'group': 'SRD', 'subgroup': 'Year Coverage', 'order': 0, 'caption': 'Number of years with observations.'} slicer = slicers.HealpixSlicer(nside=nside) metric = metrics.YearCoverageMetric() for f in filterlist: plotDict = {'colorMin': 7, 'colorMax': 10, 'color': colors[f]} summary = [metrics.AreaSummaryMetric(area=18000, reduce_func=np.mean, decreasing=True, metricName='N Seasons (18k) %s' % f)] bundleList.append(mb.MetricBundle(metric, slicer, filtersqls[f], plotDict=plotDict, metadata=filtermetadata[f], displayDict=displayDict, summaryMetrics=summary)) ######################### # Solar System ######################### # Generally, we need to run Solar System metrics separately; they're a multi-step process. ######################### # Galaxies ######################### displayDict = {'group': 'Galaxies', 'subgroup': 'Galaxy Counts', 'order': 0, 'caption': None} plotDict = {'percentileClip': 95., 'nTicks': 5} sql = extraSql + joiner + 'filter="i"' metadata = combineMetadata(extraMetadata, 'i band') metric = GalaxyCountsMetric_extended(filterBand='i', redshiftBin='all', nside=nside) summary = [metrics.AreaSummaryMetric(area=18000, reduce_func=np.sum, decreasing=True, metricName='N Galaxies (18k)')] summary.append(metrics.SumMetric(metricName='N Galaxies (all)')) # make sure slicer has cache off slicer = slicers.HealpixSlicer(nside=nside, useCache=False) displayDict['caption'] = 'Number of galaxies across the sky, in i band. Generally, full survey footprint.' bundle = mb.MetricBundle(metric, slicer, sql, plotDict=plotDict, metadata=metadata, displayDict=displayDict, summaryMetrics=summary, plotFuncs=subsetPlots) bundleList.append(bundle) displayDict['order'] += 1 ######################### # Cosmology ######################### # note the desc batch does not currently take the extraSql or extraMetadata arguments. descBundleDict = descWFDBatch(colmap=colmap, runName=runName, nside=nside) for d in descBundleDict: bundleList.append(descBundleDict[d]) ######################### # Variables and Transients ######################### displayDict = {'group': 'Variables/Transients', 'subgroup': 'Periodic Stars', 'order': 0, 'caption': None} for period in [0.5, 1, 2,]: for magnitude in [21., 24.]: amplitudes = [0.05, 0.1, 1.0] periods = [period] * len(amplitudes) starMags = [magnitude] * len(amplitudes) plotDict = {'nTicks': 3, 'colorMin': 0, 'colorMax': 3, 'xMin': 0, 'xMax': 3} metadata = combineMetadata('P_%.1f_Mag_%.0f_Amp_0.05-0.1-1' % (period, magnitude), extraMetadata) sql = None displayDict['caption'] = 'Metric evaluates if a periodic signal of period %.1f days could ' \ 'be detected for an r=%i star. A variety of amplitudes of periodicity ' \ 'are tested: [1, 0.1, and 0.05] mag amplitudes, which correspond to ' \ 'metric values of [1, 2, or 3]. ' % (period, magnitude) metric = metrics.PeriodicDetectMetric(periods=periods, starMags=starMags, amplitudes=amplitudes, metricName='PeriodDetection') bundle = mb.MetricBundle(metric, healslicer, sql, metadata=metadata, displayDict=displayDict, plotDict=plotDict, plotFuncs=subsetPlots, summaryMetrics=standardStats) bundleList.append(bundle) displayDict['order'] += 1 # XXX add some PLASTICC metrics for kilovnova and tidal disruption events. displayDict['subgroup'] = 'KN' displayDict['caption'] = 'Fraction of Kilonova (from PLASTICC)' displayDict['order'] = 0 slicer = plasticc_slicer(plcs=plasticc_models_dict['KN'], seed=43, badval=0) metric = Plasticc_metric(metricName='KN') plotFuncs = [plots.HealpixSkyMap()] summary_stats = [metrics.MeanMetric(maskVal=0)] bundle = mb.MetricBundle(metric, slicer, extraSql, runName=runName, summaryMetrics=summary_stats, plotFuncs=plotFuncs, metadata=extraMetadata, displayDict=displayDict) bundleList.append(bundle) # Tidal Disruption Events displayDict['subgroup'] = 'TDE' displayDict['caption'] = 'TDE lightcurves that could be identified' metric = TdePopMetric() slicer = generateTdePopSlicer() sql = '' plotDict = {'reduceFunc': np.sum, 'nside': 128} plotFuncs = [plots.HealpixSkyMap()] bundle = mb.MetricBundle(metric, slicer, sql, runName=runName, plotDict=plotDict, plotFuncs=plotFuncs, summaryMetrics=[metrics.MeanMetric(maskVal=0)], displayDict=displayDict) bundleList.append(bundle) # Microlensing events displayDict['subgroup'] = 'Microlensing' displayDict['caption'] = 'Fast microlensing events' plotDict = {'nside': 128} sql = '' slicer = generateMicrolensingSlicer(min_crossing_time=1, max_crossing_time=10) metric = MicrolensingMetric(metricName='Fast Microlensing') bundle = mb.MetricBundle(metric, slicer, sql, runName=runName, summaryMetrics=[metrics.MeanMetric(maskVal=0)], plotFuncs=[plots.HealpixSkyMap()], metadata=extraMetadata, displayDict=displayDict, plotDict=plotDict) bundleList.append(bundle) displayDict['caption'] = 'Slow microlensing events' slicer = generateMicrolensingSlicer(min_crossing_time=100, max_crossing_time=1500) metric = MicrolensingMetric(metricName='Slow Microlensing') bundle = mb.MetricBundle(metric, slicer, sql, runName=runName, summaryMetrics=[metrics.MeanMetric(maskVal=0)], plotFuncs=[plots.HealpixSkyMap()], metadata=extraMetadata, displayDict=displayDict, plotDict=plotDict) bundleList.append(bundle) ######################### # Milky Way ######################### displayDict = {'group': 'Milky Way', 'subgroup': ''} displayDict['subgroup'] = 'N stars' slicer = slicers.HealpixSlicer(nside=nside, useCache=False) sum_stats = [metrics.SumMetric(metricName='Total N Stars, crowding')] for f in filterlist: stellar_map = maps.StellarDensityMap(filtername=f) displayDict['order'] = filterorders[f] displayDict['caption'] = 'Number of stars in %s band with an measurement error due to crowding ' \ 'of less than 0.2 mag' % f # Configure the NstarsMetric - note 'filtername' refers to the filter in which to evaluate crowding metric = metrics.NstarsMetric(crowding_error=0.2, filtername=f, ignore_crowding=False, seeingCol=colmap['seeingGeom'], m5Col=colmap['fiveSigmaDepth'], maps=[]) plotDict = {'nTicks': 5, 'logScale': True, 'colorMin': 100} bundle = mb.MetricBundle(metric, slicer, filtersqls[f], runName=runName, summaryMetrics=sum_stats, plotFuncs=subsetPlots, plotDict=plotDict, displayDict=displayDict, mapsList=[stellar_map]) bundleList.append(bundle) slicer = slicers.HealpixSlicer(nside=nside, useCache=False) sum_stats = [metrics.SumMetric(metricName='Total N Stars, no crowding')] for f in filterlist: stellar_map = maps.StellarDensityMap(filtername=f) displayDict['order'] = filterorders[f] displayDict['caption'] = 'Number of stars in %s band with an measurement error ' \ 'of less than 0.2 mag, not considering crowding' % f # Configure the NstarsMetric - note 'filtername' refers to the filter in which to evaluate crowding metric = metrics.NstarsMetric(crowding_error=0.2, filtername=f, ignore_crowding=True, seeingCol=colmap['seeingGeom'], m5Col=colmap['fiveSigmaDepth'], metricName='Nstars_no_crowding', maps=[]) plotDict = {'nTicks': 5, 'logScale': True, 'colorMin': 100} bundle = mb.MetricBundle(metric, slicer, filtersqls[f], runName=runName, summaryMetrics=sum_stats, plotFuncs=subsetPlots, plotDict=plotDict, displayDict=displayDict, mapsList=[stellar_map]) bundleList.append(bundle) ######################### # DDF ######################### if DDF: # Hide this import to avoid adding a dependency. from lsst.sims.featureScheduler.surveys import generate_dd_surveys, Deep_drilling_survey ddf_surveys = generate_dd_surveys() # Add on the Euclid fields # XXX--to update. Should have a spot where all the DDF locations are stored. ddf_surveys.append(Deep_drilling_survey([], 58.97, -49.28, survey_name='DD:EDFSa')) ddf_surveys.append(Deep_drilling_survey([], 63.6, -47.60, survey_name='DD:EDFSb')) # For doing a high-res sampling of the DDF for co-adds ddf_radius = 1.8 # Degrees ddf_nside = 512 ra, dec = hpid2RaDec(ddf_nside, np.arange(hp.nside2npix(ddf_nside))) displayDict = {'group': 'DDF depths', 'subgroup': None} for survey in ddf_surveys: displayDict['subgroup'] = survey.survey_name # Crop off the u-band only DDF if survey.survey_name[0:4] != 'DD:u': dist_to_ddf = angularSeparation(ra, dec, np.degrees(survey.ra), np.degrees(survey.dec)) goodhp = np.where(dist_to_ddf <= ddf_radius) slicer = slicers.UserPointsSlicer(ra=ra[goodhp], dec=dec[goodhp], useCamera=False) for f in filterlist: metric = metrics.Coaddm5Metric(metricName=survey.survey_name + ', ' + f) summary = [metrics.MedianMetric(metricName='Median depth ' + survey.survey_name+', ' + f)] plotDict = {'color': colors[f]} sql = filtersqls[f] displayDict['order'] = filterorders[f] displayDict['caption'] = 'Coadded m5 depth in %s band.' % (f) bundle = mb.MetricBundle(metric, slicer, sql, metadata=filtermetadata[f], displayDict=displayDict, summaryMetrics=summary, plotFuncs=[], plotDict=plotDict) bundleList.append(bundle) displayDict = {'group': 'DDF Transients', 'subgroup': None} for survey in ddf_surveys: displayDict['subgroup'] = survey.survey_name if survey.survey_name[0:4] != 'DD:u': slicer = plasticc_slicer(plcs=plasticc_models_dict['SNIa-normal'], seed=42, ra_cen=survey.ra, dec_cen=survey.dec, radius=np.radians(3.), useCamera=False) metric = Plasticc_metric(metricName=survey.survey_name+' SNIa') sql = extraSql summary_stats = [metrics.MeanMetric(maskVal=0)] plotFuncs = [plots.HealpixSkyMap()] bundle = mb.MetricBundle(metric, slicer, sql, runName=runName, summaryMetrics=summary_stats, plotFuncs=plotFuncs, metadata=extraMetadata, displayDict=displayDict) bundleList.append(bundle) displayDict['order'] = 10 # Set the runName for all bundles and return the bundleDict. for b in bundleList: b.setRunName(runName) bundleDict = mb.makeBundlesDictFromList(bundleList) return bundleDict