def testHistogramMetric(self): metric = metrics.HistogramMetric(bins=[0.5, 1.5, 2.5]) slicer = slicers.HealpixSlicer(nside=16) sql = '' mb = metricBundle.MetricBundle(metric, slicer, sql) mbg = metricBundle.MetricBundleGroup({0: mb}, None, saveEarly=False) mbg.setCurrent('') mbg.runCurrent('', simData=self.simData) good = np.where(mb.metricValues.mask[:, -1] == False)[0] expected = np.array([[self.n1, 0.], [0., self.n2]]) assert (np.array_equal(mb.metricValues.data[good, :], expected)) # Check that I can run a different statistic metric = metrics.HistogramMetric(col='fiveSigmaDepth', statistic='sum', bins=[0.5, 1.5, 2.5]) mb = metricBundle.MetricBundle(metric, slicer, sql) mbg = metricBundle.MetricBundleGroup({0: mb}, None, saveEarly=False) mbg.setCurrent('') mbg.runCurrent('', simData=self.simData) expected = np.array([[self.m5_1 * self.n1, 0.], [0., self.m5_2 * self.n2]]) assert (np.array_equal(mb.metricValues.data[good, :], expected))
def run_maf(dbFile, ra, dec): """Retrive min inter_night gap, and observation history with the input of database file name and arrays of RA and DEC. Note: the observing cadence returned are not ordered by date!! """ # establish connection to sqllite database file. opsimdb = db.OpsimDatabase(dbFile) # While we're in transition between opsim v3 and v4, this may be helpful: print("{dbFile} is an opsim version {version} database".format(dbFile=dbFile, version=opsimdb.opsimVersion)) if opsimdb.opsimVersion == "V3": # For v3 databases: mjdcol = 'expMJD' degrees = False cols = ['filter', 'fiveSigmaDepth', mjdcol, 'expDate'] stackerList = [] else: # For v4 and alternate scheduler databases. mjdcol = 'observationStartMJD' degrees = True cols = ['filter', 'fiveSigmaDepth', mjdcol] stackerList = [expDateStacker()] # IntraNightGapsMetric returns the gap (in days) between observations within the same night custom reduceFunc to find min gaps metric = metrics.cadenceMetrics.IntraNightGapsMetric(reduceFunc=np.amin, mjdCol=mjdcol) # PassMetric just pass all values metric_pass = metrics.simpleMetrics.PassMetric(cols=cols) # slicer for slicing pointing history slicer = slicers.UserPointsSlicer(ra, dec, lonCol='fieldRA', latCol='fieldDec', latLonDeg=degrees) # sql constrains, 3 for baseline2018a, 1 for rolling m2045 sql = '' # bundles to combine metric, slicer and sql constrain together bundle = metricBundles.MetricBundle(metric, slicer, sql) date_bundle = metricBundles.MetricBundle(metric_pass, slicer, sql, stackerList=stackerList) # create metric bundle group and returns bg = metricBundles.MetricBundleGroup( { 'sep': bundle, 'cadence': date_bundle }, opsimdb, outDir=outDir, resultsDb=resultsDb) bg.runAll() opsimdb.close() return bg
def slewSpeeds(colmap=None, runName='opsim', sqlConstraint=None): """Generate a set of slew statistics focused on the speeds of each component (dome and telescope). These slew statistics must be run on the SlewMaxSpeeds table in opsimv4 and opsimv3. Parameters ---------- colmap : dict or None, opt A dictionary with a mapping of column names. Default will use OpsimV4 column names. Note that for these metrics, the column names are distinctly different in v3/v4. runName : str, opt The name of the simulated survey. Default is "opsim". sqlConstraint : str or None, opt SQL constraint to apply to metrics. Note this runs on Slew*State table, so constraints should generally be based on slew_slewCount. Returns ------- metricBundleDict """ if colmap is None: colmap = ColMapDict('opsimV4') bundleList = [] # All of these metrics run with a unislicer, on all the slew data. slicer = slicers.UniSlicer() speeds = ['Dome Alt Speed', 'Dome Az Speed', 'Tel Alt Speed', 'Tel Az Speed', 'Rotator Speed'] displayDict = {'group': 'Slew', 'subgroup': 'Slew Speeds', 'order': -1, 'caption': None} for speed in speeds: metadata = combineMetadata(speed, sqlConstraint) metric = metrics.AbsMaxMetric(col=colmap[speed], metricName='Max (Abs)') displayDict['caption'] = 'Maximum absolute value of %s.' % speed displayDict['order'] += 1 bundle = mb.MetricBundle(metric, slicer, sqlConstraint, displayDict=displayDict, metadata=metadata) bundleList.append(bundle) metric = metrics.AbsMeanMetric(col=colmap[speed], metricName='Mean (Abs)') displayDict['caption'] = 'Mean absolute value of %s.' % speed displayDict['order'] += 1 bundle = mb.MetricBundle(metric, slicer, sqlConstraint, displayDict=displayDict, metadata=metadata) bundleList.append(bundle) metric = metrics.AbsMaxPercentMetric(col=colmap[speed], metricName='% @ Max') displayDict['caption'] = 'Percent of slews at the maximum %s (absolute value).' % speed displayDict['order'] += 1 bundle = mb.MetricBundle(metric, slicer, sqlConstraint, displayDict=displayDict, metadata=metadata) bundleList.append(bundle) for b in bundleList: b.setRunName(runName) return mb.makeBundlesDictFromList(bundleList)
def meanRADec(colmap=None, runName='opsim', extraSql=None, extraMetadata=None): """Plot the range of RA/Dec as a function of night. Parameters ---------- colmap : dict, opt A dictionary with a mapping of column names. Default will use OpsimV4 column names. runName : str, opt The name of the simulated survey. Default is "opsim". extraSql : str, opt Additional constraint to add to any sql constraints (e.g. 'night<365') Default None, for no additional constraints. extraMetadata : str, opt Additional metadata to add before any below (i.e. "WFD"). Default is None. """ if colmap is None: colmap = ColMapDict('opsimV4') bundleList = [] plotBundles = [] group = 'RA Dec coverage' subgroup = 'All visits' if extraMetadata is not None: subgroup = extraMetadata displayDict = {'group': group, 'subgroup': subgroup, 'order': 0} ra_metrics = [metrics.MeanAngleMetric(colmap['ra']), metrics.FullRangeAngleMetric(colmap['ra'])] dec_metrics = [metrics.MeanMetric(colmap['dec']), metrics.MinMetric(colmap['dec']), metrics.MaxMetric(colmap['dec'])] for m in ra_metrics: slicer = slicers.OneDSlicer(sliceColName=colmap['night'], binsize=1) if not colmap['raDecDeg']: plotDict = {'yMin': np.radians(-5), 'yMax': np.radians(365)} else: plotDict = {'yMin': -5, 'yMax': 365} bundle = mb.MetricBundle(m, slicer, extraSql, metadata=extraMetadata, displayDict=displayDict, plotDict=plotDict) bundleList.append(bundle) for m in dec_metrics: slicer = slicers.OneDSlicer(sliceColName=colmap['night'], binsize=1) bundle = mb.MetricBundle(m, slicer, extraSql, metadata=extraMetadata, displayDict=displayDict) bundleList.append(bundle) # Set the runName for all bundles and return the bundleDict. for b in bundleList: b.setRunName(runName) return mb.makeBundlesDictFromList(bundleList), plotBundles
def load_and_run(): dbFile = 'baseline_nexp2_v1.7_10yrs.db' opsimdb = db.OpsimDatabase(dbFile) runName = dbFile.replace('.db', '') nside = 64 slicer = slicers.HealpixSlicer(nside=nside) metric = SNNSNMetric(verbose=False) #, zlim_coeff=0.98) bundleList = [] #sql = '' sql = '(note = "%s")' % ('DD:COSMOS') bundleList.append( metricBundles.MetricBundle(metric, slicer, sql, runName=runName)) outDir = 'temp' resultsDb = db.ResultsDb(outDir=outDir) bundleDict = metricBundles.makeBundlesDictFromList(bundleList) bgroup = metricBundles.MetricBundleGroup(bundleDict, opsimdb, outDir=outDir, resultsDb=resultsDb) bgroup.runAll() bgroup.plotAll()
def compute_metric(params): """Function to execute the metric calculation when code is called from the commandline""" obsdb = db.OpsimDatabase('../../tutorials/baseline2018a.db') outputDir = '/home/docmaf/' resultsDb = db.ResultsDb(outDir=outputDir) (propids, proptags) = obsdb.fetchPropInfo() surveyWhere = obsdb.createSQLWhere(params['survey'], proptags) obs_params = { 'filters': params['filters'], 'cadence': params['cadence'], 'start_date': params['start_date'], 'end_date': params['end_date'] } metric = CadenceOverVisibilityWindowMetric(**obs_params) slicer = slicers.HealpixSlicer(nside=64) sqlconstraint = surveyWhere bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint) bgroup = metricBundles.MetricBundleGroup({0: bundle}, obsdb, outDir='newmetric_test', resultsDb=resultsDb) bgroup.runAll()
def tdcBatch(colmap=None, runName='opsim', nside=64, accuracyThreshold=0.04, extraSql=None, extraMetadata=None): # The options to add additional sql constraints are removed for now. if colmap is None: colmap = ColMapDict('fbs') # Calculate a subset of DESC WFD-related metrics. displayDict = {'group': 'Strong Lensing'} displayDict['subgroup'] = 'Lens Time Delay' subsetPlots = [plots.HealpixSkyMap(), plots.HealpixHistogram()] summaryMetrics = [metrics.MeanMetric(), metrics.MedianMetric(), metrics.RmsMetric()] # Ideally need a way to do better on calculating the summary metrics for the high accuracy area. slicer = slicers.HealpixSlicer(nside=nside) tdcMetric = metrics.TdcMetric(metricName='TDC', nightCol=colmap['night'], expTimeCol=colmap['exptime'], mjdCol=colmap['mjd']) bundle = mb.MetricBundle(tdcMetric, slicer, constraint=extraSql, metadata=extraMetadata, displayDict=displayDict, plotFuncs=subsetPlots, summaryMetrics=summaryMetrics) bundleList = [bundle] # Set the runName for all bundles and return the bundleDict. for b in bundleList: b.setRunName(runName) return mb.makeBundlesDictFromList(bundleList)
def nvisitsPerNight(colmap=None, runName='opsim', binNights=1, sqlConstraint=None, metadata=None): """Count the number of visits per night through the survey. Parameters ---------- colmap : dict or None, opt A dictionary with a mapping of column names. Default will use OpsimV4 column names. runName : str, opt The name of the simulated survey. Default is "opsim". binNights : int, opt Number of nights to count in each bin. Default = 1, count number of visits in each night. sqlConstraint : str or None, opt Additional constraint to add to any sql constraints (e.g. 'propId=1' or 'fieldID=522'). Default None, for no additional constraints. metadata : str or None, opt Additional metadata to add before any below (i.e. "WFD"). Default is None. Returns ------- metricBundleDict """ if colmap is None: colmap = ColMapDict('opsimV4') subgroup = metadata if subgroup is None: subgroup = 'All visits' metadataCaption = metadata if metadata is None: if sqlConstraint is not None: metadataCaption = sqlConstraint else: metadataCaption = 'all visits' bundleList = [] displayDict = {'group': 'Per Night', 'subgroup': subgroup} displayDict['caption'] = 'Number of visits per night for %s.' % ( metadataCaption) displayDict['order'] = 0 metric = metrics.CountMetric(colmap['mjd'], metricName='Nvisits') slicer = slicers.OneDSlicer(sliceColName=colmap['mjd'], binsize=int(binNights)) bundle = mb.MetricBundle(metric, slicer, sqlConstraint, metadata=metadata, displayDict=displayDict, summaryMetrics=standardSummary()) bundleList.append(bundle) # Set the runName for all bundles and return the bundleDict. for b in bundleList: b.setRunName(runName) return mb.makeBundlesDictFromList(bundleList)
def __getOpSimMjd(self, opsim, ra, dec, fil): colmn = 'observationStartMJD' opsdb = db.OpsimDatabase(opsim) # Directory where tmp files are going to be stored TODO eliminate - this outDir = 'TmpDir' resultsDb = db.ResultsDb(outDir=outDir) metric = metrics.PassMetric(cols=[colmn, 'fiveSigmaDepth', 'filter']) slicer = slicers.UserPointsSlicer(ra=ra, dec=dec) sqlconstraint = 'filter = \'' + fil + '\'' bundle = mb.MetricBundle(metric, slicer, sqlconstraint, runName='name') bgroup = mb.MetricBundleGroup({0: bundle}, opsdb, outDir=outDir, resultsDb=resultsDb) bgroup.runAll() filters = np.unique(bundle.metricValues[0]['filter']) mv = bundle.metricValues[0] # Get dates mjd = mv[colmn] mjd = np.sort(mjd) print('Num of visits ' + str(len(mjd)) + ' ' + opsim) return mjd
def altazLambert(colmap=None, runName='opsim', extraSql=None, extraMetadata=None, metricName='Nvisits as function of Alt/Az'): """Generate a set of metrics measuring the number visits as a function of alt/az plotted on a LambertSkyMap. Parameters ---------- colmap : dict, opt A dictionary with a mapping of column names. Default will use OpsimV4 column names. runName : str, opt The name of the simulated survey. Default is "opsim". extraSql : str, opt Additional constraint to add to any sql constraints (e.g. 'propId=1' or 'fieldID=522'). Default None, for no additional constraints. extraMetadata : str, opt Additional metadata to add before any below (i.e. "WFD"). Default is None. metricName : str, opt Unique name to assign to metric Returns ------- metricBundleDict """ colmap, slicer, metric = basicSetup(metricName=metricName, colmap=colmap) # Set up basic all and per filter sql constraints. filterlist, colors, orders, sqls, metadata = filterList( all=True, extraSql=extraSql, extraMetadata=extraMetadata) bundleList = [] plotFunc = plots.LambertSkyMap() for f in filterlist: if f is 'all': subgroup = 'All Observations' else: subgroup = 'Per filter' displayDict = { 'group': 'Alt/Az', 'order': orders[f], 'subgroup': subgroup, 'caption': 'Alt/Az pointing distribution for filter %s' % f } bundle = mb.MetricBundle(metric, slicer, sqls[f], runName=runName, metadata=metadata[f], plotFuncs=[plotFunc], displayDict=displayDict) bundleList.append(bundle) for b in bundleList: b.setRunName(runName) return mb.makeBundlesDictFromList(bundleList)
def testHistogramM5Metric(self): metric = metrics.HistogramM5Metric(bins=[0.5, 1.5, 2.5]) slicer = slicers.HealpixSlicer(nside=16) sql = '' mb = metricBundle.MetricBundle(metric, slicer, sql) # Clobber the stacker that gets auto-added mb.stackerList = [] mbg = metricBundle.MetricBundleGroup({0: mb}, None, saveEarly=False) mbg.setCurrent('') mbg.runCurrent('', simData=self.simData) good = np.where((mb.metricValues.mask[:, 0] == False) | (mb.metricValues.mask[:, 1] == False))[0] checkMetric = metrics.Coaddm5Metric() tempSlice = np.zeros(self.n1, dtype=list(zip(['fiveSigmaDepth'], [float]))) tempSlice['fiveSigmaDepth'] += self.m5_1 val1 = checkMetric.run(tempSlice) tempSlice = np.zeros(self.n2, dtype=list(zip(['fiveSigmaDepth'], [float]))) tempSlice['fiveSigmaDepth'] += self.m5_2 val2 = checkMetric.run(tempSlice) expected = np.array([[val1, -666.], [-666., val2]]) assert (np.array_equal(mb.metricValues.data[good, :], expected))
def runChips(useCamera=False): import numpy as np import lsst.sims.maf.slicers as slicers import lsst.sims.maf.metrics as metrics import lsst.sims.maf.metricBundles as metricBundles import lsst.sims.maf.db as db from lsst.sims.maf.plots import PlotHandler import matplotlib.pylab as plt import healpy as hp print 'Camera setting = ', useCamera database = 'enigma_1189_sqlite.db' sqlWhere = 'filter = "r" and night < 800 and fieldRA < %f and fieldDec > %f and fieldDec < 0' % (np.radians(15), np.radians(-15)) opsdb = db.OpsimDatabase(database) outDir = 'Camera' resultsDb = db.ResultsDb(outDir=outDir) nside=512 tag = 'F' if useCamera: tag='T' metric = metrics.CountMetric('expMJD', metricName='chipgap_%s'%tag) slicer = slicers.HealpixSlicer(nside=nside, useCamera=useCamera) bundle1 = metricBundles.MetricBundle(metric,slicer,sqlWhere) bg = metricBundles.MetricBundleGroup({0:bundle1},opsdb, outDir=outDir, resultsDb=resultsDb) bg.runAll() hp.gnomview(bundle1.metricValues, xsize=800,ysize=800, rot=(7,-7,0), unit='Count', min=1) plt.savefig(outDir+'/fig'+tag+'.png')
def testOut(self): """ Check that the metric bundle can generate the expected output """ slicer = slicers.HealpixSlicer(nside=8) metric = metrics.MeanMetric(col='airmass') sql = 'filter="r"' metricB = metricBundles.MetricBundle(metric, slicer, sql) filepath = os.path.join(os.getenv('SIMS_MAF_DIR'), 'tests/') database = os.path.join(filepath, 'opsimblitz1_1133_sqlite.db') opsdb = db.OpsimDatabase(database=database) resultsDb = db.ResultsDb(outDir=self.outDir) bgroup = metricBundles.MetricBundleGroup({0: metricB}, opsdb, outDir=self.outDir, resultsDb=resultsDb) bgroup.runAll() bgroup.plotAll() bgroup.writeAll() outThumbs = glob.glob(os.path.join(self.outDir, 'thumb*')) outNpz = glob.glob(os.path.join(self.outDir, '*.npz')) outPdf = glob.glob(os.path.join(self.outDir, '*.pdf')) # By default, make 3 plots for healpix assert (len(outThumbs) == 3) assert (len(outPdf) == 3) assert (len(outNpz) == 1)
def filtersPerNight(colmap=None, runName='opsim', nights=1, extraSql=None, extraMetadata=None): """Generate a set of metrics measuring the number and rate of filter changes over a given span of nights. Parameters ---------- colmap : dict, opt A dictionary with a mapping of column names. Default will use OpsimV4 column names. run_name : str, opt The name of the simulated survey. Default is "opsim". nights : int, opt Size of night bin to use when calculating metrics. Default is 1. extraSql : str, opt Additional constraint to add to any sql constraints (e.g. 'propId=1' or 'fieldID=522'). Default None, for no additional constraints. extraMetadata : str, opt Additional metadata to add before any below (i.e. "WFD"). Default is None. Returns ------- metricBundleDict """ if colmap is None: colmap = ColMapDict('opsimV4') bundleList = [] # Set up sql and metadata, if passed any additional information. sql = '' metadata = 'Per' if nights == 1: metadata += ' Night' else: metadata += ' %s Nights' % nights metacaption = metadata.lower() if (extraSql is not None) and (len(extraSql) > 0): sql = extraSql if extraMetadata is None: metadata += ' %s' % extraSql metacaption += ', with %s selection' % extraSql if extraMetadata is not None: metadata += ' %s' % extraMetadata metacaption += ', %s only' % extraMetadata metacaption += '.' displayDict = {'group': 'Filter Changes', 'subgroup': metadata} summaryStats = standardSummary() slicer = slicers.OneDSlicer(sliceColName=colmap['night'], binsize=nights) metricList, captionList = setupMetrics(colmap) for m, caption in zip(metricList, captionList): displayDict['caption'] = caption + metacaption bundle = mb.MetricBundle(m, slicer, sql, runName=runName, metadata=metadata, displayDict=displayDict, summaryMetrics=summaryStats) bundleList.append(bundle) for b in bundleList: b.setRunName(runName) return mb.makeBundlesDictFromList(bundleList)
def filtersWholeSurveyBatch(colmap=None, runName='opsim', extraSql=None, extraMetadata=None): """Generate a set of metrics measuring the number and rate of filter changes over the entire survey. Parameters ---------- colmap : dict, opt A dictionary with a mapping of column names. Default will use OpsimV4 column names. run_name : str, opt The name of the simulated survey. Default is "opsim". extraSql : str, opt Additional constraint to add to any sql constraints (e.g. 'propId=1' or 'fieldID=522'). Default None, for no additional constraints. extraMetadata : str, opt Additional metadata to add before any below (i.e. "WFD"). Default is None. Returns ------- metricBundleDict """ if colmap is None: colmap = ColMapDict('opsimV4') bundleList = [] # Set up sql and metadata, if passed any additional information. sql = '' metadata = 'Whole Survey' metacaption = 'over the whole survey' if (extraSql is not None) and (len(extraSql) > 0): sql = extraSql if extraMetadata is None: metadata += ' %s' % extraSql metacaption += ', with %s selction' % extraSql if extraMetadata is not None: metadata += ' %s' % extraMetadata metacaption += ', %s only' % (extraMetadata) metacaption += '.' displayDict = {'group': 'Filter Changes', 'subgroup': metadata} slicer = slicers.UniSlicer() metricList, captionList = setupMetrics(colmap) for m, caption in zip(metricList, captionList): displayDict['caption'] = caption + metacaption bundle = mb.MetricBundle(m, slicer, sql, runName=runName, metadata=metadata, displayDict=displayDict) bundleList.append(bundle) for b in bundleList: b.setRunName(runName) return mb.makeBundlesDictFromList(bundleList)
def testRunRegularToo(self): """ Test that a binned slicer and a regular slicer can run together """ bundleList = [] metric = metrics.AccumulateM5Metric(bins=[0.5, 1.5, 2.5]) slicer = slicers.HealpixSlicer(nside=16) sql = '' bundleList.append(metricBundle.MetricBundle(metric, slicer, sql)) metric = metrics.Coaddm5Metric() slicer = slicers.HealpixSlicer(nside=16) bundleList.append(metricBundle.MetricBundle(metric, slicer, sql)) bd = metricBundle.makeBundlesDictFromList(bundleList) mbg = metricBundle.MetricBundleGroup(bd, None, saveEarly=False) mbg.setCurrent('') mbg.runCurrent('', simData=self.simData) assert (np.array_equal(bundleList[0].metricValues[:, 1].compressed(), bundleList[1].metricValues.compressed()))
def slewAngles(colmap=None, runName='opsim', sqlConstraint=None): """Generate a set of slew statistics focused on the angles of each component (dome and telescope). These slew statistics must be run on the SlewFinalState or SlewInitialState table in opsimv4, and on the SlewState table in opsimv3. Parameters ---------- colmap : dict or None, opt A dictionary with a mapping of column names. Default will use OpsimV4 column names. runName : str, opt The name of the simulated survey. Default is "opsim". sqlConstraint : str or None, opt SQL constraint to apply to metrics. Note this runs on Slew*State table, so constraints should generally be based on slew_slewCount. Returns ------- metricBundleDict """ if colmap is None: colmap = ColMapDict('opsimV4') bundleList = [] # All of these metrics are run with a unislicer. slicer = slicers.UniSlicer() # For each angle, we will compute mean/median/min/max and rms. # Note that these angles can range over more than 360 degrees, because of cable wrap. # This is why we're not using the Angle metrics - here 380 degrees is NOT the same as 20 deg. # Stats for angle: angles = ['Tel Alt', 'Tel Az', 'Rot Tel Pos'] displayDict = { 'group': 'Slew', 'subgroup': 'Slew Angles', 'order': -1, 'caption': None } for angle in angles: metadata = angle metriclist = standardMetrics(colmap[angle], replace_colname='') metriclist += [metrics.RmsMetric(colmap[angle], metricName='RMS')] for metric in metriclist: displayDict['caption'] = '%s %s' % (metric.name, angle) displayDict['order'] += 1 bundle = mb.MetricBundle(metric, slicer, sqlConstraint, displayDict=displayDict, metadata=metadata) bundleList.append(bundle) for b in bundleList: b.setRunName(runName) return mb.makeBundlesDictFromList(bundleList)
def fOBatch(colmap=None, runName='opsim', extraSql=None, extraMetadata=None, nside=64, benchmarkArea=18000, benchmarkNvisits=825): # Allow user to add dithering. if colmap is None: colmap = ColMapDict('opsimV4') bundleList = [] sql = '' metadata = 'All visits' # Add additional sql constraint (such as wfdWhere) and metadata, if provided. if (extraSql is not None) and (len(extraSql) > 0): sql = extraSql if extraMetadata is None: metadata = extraSql.replace('filter =', '').replace('filter=', '') metadata = metadata.replace('"', '').replace("'", '') if extraMetadata is not None: metadata = extraMetadata subgroup = metadata raCol = colmap['ra'] decCol = colmap['dec'] degrees = colmap['raDecDeg'] # Set up fO metric. slicer = slicers.HealpixSlicer(nside=nside, lonCol=raCol, latCol=decCol, latLonDeg=degrees) displayDict = {'group': 'FO metrics', 'subgroup': subgroup, 'order': 0} # Configure the count metric which is what is used for f0 slicer. metric = metrics.CountMetric(col=colmap['mjd'], metricName='fO') plotDict = {'xlabel': 'Number of Visits', 'Asky': benchmarkArea, 'Nvisit': benchmarkNvisits, 'xMin': 0, 'xMax': 1500} summaryMetrics = [metrics.fOArea(nside=nside, norm=False, metricName='fOArea: Nvisits (#)', Asky=benchmarkArea, Nvisit=benchmarkNvisits), metrics.fOArea(nside=nside, norm=True, metricName='fOArea: Nvisits/benchmark', Asky=benchmarkArea, Nvisit=benchmarkNvisits), metrics.fONv(nside=nside, norm=False, metricName='fONv: Area (sqdeg)', Asky=benchmarkArea, Nvisit=benchmarkNvisits), metrics.fONv(nside=nside, norm=True, metricName='fONv: Area/benchmark', Asky=benchmarkArea, Nvisit=benchmarkNvisits)] caption = 'The FO metric evaluates the overall efficiency of observing. ' caption += ('fOArea: Nvisits = %.1f sq degrees receive at least this many visits out of %d. ' % (benchmarkArea, benchmarkNvisits)) caption += ('fONv: Area = this many square degrees out of %.1f receive at least %d visits.' % (benchmarkArea, benchmarkNvisits)) displayDict['caption'] = caption bundle = mb.MetricBundle(metric, slicer, sql, plotDict=plotDict, displayDict=displayDict, summaryMetrics=summaryMetrics, plotFuncs=[plots.FOPlot()], metadata=metadata) bundleList.append(bundle) # Set the runName for all bundles and return the bundleDict. for b in bundleList: b.setRunName(runName) return mb.makeBundlesDictFromList(bundleList)
def testOpsim2dSlicer(self): metric = metrics.AccumulateCountMetric(bins=[0.5, 1.5, 2.5]) slicer = slicers.OpsimFieldSlicer() sql = '' mb = metricBundle.MetricBundle(metric, slicer, sql) mbg = metricBundle.MetricBundleGroup({0: mb}, None, saveEarly=False) mbg.setCurrent('') mbg.fieldData = self.fieldData mbg.runCurrent('', simData=self.simData) expected = np.array([[self.n1, self.n1], [-666., self.n2]]) assert (np.array_equal(mb.metricValues.data, expected))
def testHealpix2dSlicer(self): metric = metrics.AccumulateCountMetric(bins=[0.5, 1.5, 2.5]) slicer = slicers.HealpixSlicer(nside=16) sql = '' mb = metricBundle.MetricBundle(metric, slicer, sql) mbg = metricBundle.MetricBundleGroup({0: mb}, None, saveEarly=False) mbg.setCurrent('') mbg.runCurrent('', simData=self.simData) good = np.where(mb.metricValues.mask[:, -1] == False)[0] expected = np.array([[self.n1, self.n1], [-666., self.n2]]) assert (np.array_equal(mb.metricValues.data[good, :], expected))
def testAccumulateMetric(self): metric = metrics.AccumulateMetric(col='fiveSigmaDepth', bins=[0.5, 1.5, 2.5]) slicer = slicers.HealpixSlicer(nside=16) sql = '' mb = metricBundle.MetricBundle(metric, slicer, sql) # Clobber the stacker that gets auto-added mb.stackerList = [] mbg = metricBundle.MetricBundleGroup({0: mb}, None, saveEarly=False) mbg.setCurrent('') mbg.runCurrent('', simData=self.simData) good = np.where(mb.metricValues.mask[:, -1] == False)[0] expected = np.array([[self.n1*self.m5_1, self.n1*self.m5_1], [-666., self.n2 * self.m5_2]]) assert(np.array_equal(mb.metricValues.data[good, :], expected))
def testOut(self): """ Check that the metric bundle can generate the expected output """ nside = 8 slicer = slicers.HealpixSlicer(nside=nside) metric = metrics.MeanMetric(col='airmass') sql = 'filter="r"' stacker1 = stackers.RandomDitherFieldPerVisitStacker() stacker2 = stackers.GalacticStacker() map1 = maps.GalCoordsMap() map2 = maps.StellarDensityMap() metricB = metricBundles.MetricBundle(metric, slicer, sql, stackerList=[stacker1, stacker2], mapsList=[map1, map2]) database = os.path.join(getPackageDir('sims_data'), 'OpSimData', 'astro-lsst-01_2014.db') opsdb = db.OpsimDatabaseV4(database=database) resultsDb = db.ResultsDb(outDir=self.outDir) bgroup = metricBundles.MetricBundleGroup({0: metricB}, opsdb, outDir=self.outDir, resultsDb=resultsDb) bgroup.runAll() bgroup.plotAll() bgroup.writeAll() opsdb.close() outThumbs = glob.glob(os.path.join(self.outDir, 'thumb*')) outNpz = glob.glob(os.path.join(self.outDir, '*.npz')) outPdf = glob.glob(os.path.join(self.outDir, '*.pdf')) # By default, make 3 plots for healpix assert (len(outThumbs) == 3) assert (len(outPdf) == 3) assert (len(outNpz) == 1)
def testAccumulateM5Metric(self): metric = metrics.AccumulateM5Metric(bins=[0.5, 1.5, 2.5]) slicer = slicers.HealpixSlicer(nside=16) sql = '' mb = metricBundle.MetricBundle(metric, slicer, sql) mbg = metricBundle.MetricBundleGroup({0: mb}, None, saveEarly=False) mbg.setCurrent('') mbg.runCurrent('', simData=self.simData) good = np.where(mb.metricValues.mask[:, -1] == False)[0] checkMetric = metrics.Coaddm5Metric() tempSlice = np.zeros(self.n1, dtype=zip(['fiveSigmaDepth'], [float])) tempSlice['fiveSigmaDepth'] += self.m5_1 val1 = checkMetric.run(tempSlice) tempSlice = np.zeros(self.n2, dtype=zip(['fiveSigmaDepth'], [float])) tempSlice['fiveSigmaDepth'] += self.m5_2 val2 = checkMetric.run(tempSlice) expected = np.array([[val1, val1], [-666., val2]]) assert (np.array_equal(mb.metricValues.data[good, :], expected))
def get_cadence(ra, dec, b, snrLimit, nPtsLimit, filters, outDir, opsimdb, resultsDb): # The pass metric just passes data straight through. metric = metrics.PassMetric(cols=['filter', 'fiveSigmaDepth', 'expMJD']) slicer = slicers.UserPointsSlicer(ra, dec, lonCol='ditheredRA', latCol='ditheredDec') sql = '' bundle = metricBundles.MetricBundle(metric, slicer, sql) bg = metricBundles.MetricBundleGroup({0: bundle}, opsimdb, outDir=outDir, resultsDb=resultsDb) bg.runAll() bundle.metricValues.data[0]['filter'] print("Plotting...") colors = {'u': 'cyan', 'g': 'g', 'r': 'y', 'i': 'r', 'z': 'm', 'y': 'k'} dayZero = bundle.metricValues.data[0]['expMJD'].min() times = [] depths = [] plt.clf() for fname in filters: good = np.where(bundle.metricValues.data[0]['filter'] == fname) times.append(bundle.metricValues.data[0]['expMJD'][good] - dayZero) depths.append(bundle.metricValues.data[0]['fiveSigmaDepth'][good]) plt.scatter(bundle.metricValues.data[0]['expMJD'][good] - dayZero, bundle.metricValues.data[0]['fiveSigmaDepth'][good], c=colors[fname], label=fname) plt.xlabel('Day') plt.ylabel('5$\sigma$ depth') plt.legend(scatterpoints=1, loc="upper left", bbox_to_anchor=(1, 1)) plt.savefig("l45b{0}_cadence.pdf".format(int(b))) return times, depths
def testOut(self): """ Check that the metric bundle can generate the expected output """ nside = 8 slicer = slicers.HealpixSlicer(nside=nside) metric = metrics.MeanMetric(col='airmass') sql = 'filter="r"' stacker1 = stackers.RandomDitherFieldPerVisitStacker() stacker2 = stackers.GalacticStacker() map1 = maps.GalCoordsMap() map2 = maps.StellarDensityMap() metricB = metricBundles.MetricBundle(metric, slicer, sql, stackerList=[stacker1, stacker2]) filepath = os.path.join(os.getenv('SIMS_MAF_DIR'), 'tests/') database = os.path.join(filepath, 'opsimblitz1_1133_sqlite.db') opsdb = db.OpsimDatabase(database=database) resultsDb = db.ResultsDb(outDir=self.outDir) bgroup = metricBundles.MetricBundleGroup({0: metricB}, opsdb, outDir=self.outDir, resultsDb=resultsDb) bgroup.runAll() bgroup.plotAll() bgroup.writeAll() outThumbs = glob.glob(os.path.join(self.outDir, 'thumb*')) outNpz = glob.glob(os.path.join(self.outDir, '*.npz')) outPdf = glob.glob(os.path.join(self.outDir, '*.pdf')) # By default, make 3 plots for healpix assert (len(outThumbs) == 3) assert (len(outPdf) == 3) assert (len(outNpz) == 1)
def makeBundleList(dbFile, night=1, nside=64, latCol='ditheredDec', lonCol='ditheredRA'): """ Make a bundleList of things to run """ # Construct sql queries for each filter and all filters filters = ['u', 'g', 'r', 'i', 'z', 'y'] sqls = ['night=%i and filter="%s"' % (night, f) for f in filters] sqls.append('night=%i' % night) bundleList = [] plotFuncs_lam = [plots.LambertSkyMap()] reg_slicer = slicers.HealpixSlicer(nside=nside, lonCol=lonCol, latCol=latCol, latLonDeg=False) altaz_slicer = slicers.HealpixSlicer(nside=nside, latCol='altitude', latLonDeg=False, lonCol='azimuth', useCache=False) unislicer = slicers.UniSlicer() for sql in sqls: # Number of exposures metric = metrics.CountMetric('expMJD', metricName='N visits') bundle = metricBundles.MetricBundle(metric, reg_slicer, sql) bundleList.append(bundle) metric = metrics.CountMetric('expMJD', metricName='N visits alt az') bundle = metricBundles.MetricBundle(metric, altaz_slicer, sql, plotFuncs=plotFuncs_lam) bundleList.append(bundle) metric = metrics.MeanMetric('expMJD', metricName='Mean Visit Time') bundle = metricBundles.MetricBundle(metric, reg_slicer, sql) bundleList.append(bundle) metric = metrics.MeanMetric('expMJD', metricName='Mean Visit Time alt az') bundle = metricBundles.MetricBundle(metric, altaz_slicer, sql, plotFuncs=plotFuncs_lam) bundleList.append(bundle) metric = metrics.CountMetric('expMJD', metricName='N_visits') bundle = metricBundles.MetricBundle(metric, unislicer, sql) bundleList.append(bundle) # Need pairs in window to get a map of how well it gathered SS pairs. # Moon phase. metric = metrics.NChangesMetric(col='filter', metricName='Filter Changes') bundle = metricBundles.MetricBundle(metric, unislicer, 'night=%i' % night) bundleList.append(bundle) metric = metrics.OpenShutterFractionMetric() bundle = metricBundles.MetricBundle(metric, unislicer, 'night=%i' % night) bundleList.append(bundle) metric = metrics.MeanMetric('slewTime') bundle = metricBundles.MetricBundle(metric, unislicer, 'night=%i' % night) bundleList.append(bundle) metric = metrics.MinMetric('slewTime') bundle = metricBundles.MetricBundle(metric, unislicer, 'night=%i' % night) bundleList.append(bundle) metric = metrics.MaxMetric('slewTime') bundle = metricBundles.MetricBundle(metric, unislicer, 'night=%i' % night) bundleList.append(bundle) # Make plots of the solar system pairs that were taken in the night metric = metrics.PairMetric() sql = 'night=%i and (filter ="r" or filter="g" or filter="i")' % night bundle = metricBundles.MetricBundle(metric, reg_slicer, sql) bundleList.append(bundle) metric = metrics.PairMetric(metricName='z Pairs') sql = 'night=%i and filter="z"' % night bundle = metricBundles.MetricBundle(metric, reg_slicer, sql) bundleList.append(bundle) # Plot up each visit metric = metrics.NightPointingMetric() slicer = slicers.UniSlicer() sql = sql = 'night=%i' % night plotFuncs = [plots.NightPointingPlotter()] bundle = metricBundles.MetricBundle(metric, slicer, sql, plotFuncs=plotFuncs) bundleList.append(bundle) return metricBundles.makeBundlesDictFromList(bundleList)
def makeBundleList(dbFile, night=1, nside=64, latCol='fieldDec', lonCol='fieldRA', notes=True, colmap=None): """ Make a bundleList of things to run """ if colmap is None: colmap = ColMapDict('opsimV4') mjdCol = 'observationStartMJD' altCol = 'altitude' azCol = 'azimuth' # Construct sql queries for each filter and all filters filters = ['u', 'g', 'r', 'i', 'z', 'y'] sqls = ['night=%i and filter="%s"' % (night, f) for f in filters] sqls.append('night=%i' % night) bundleList = [] plotFuncs_lam = [plots.LambertSkyMap()] # Hourglass hourslicer = slicers.HourglassSlicer() displayDict = {'group': 'Hourglass'} md = '' sql = 'night=%i' % night metric = metrics.HourglassMetric(nightCol=colmap['night'], mjdCol=colmap['mjd'], metricName='Hourglass') bundle = metricBundles.MetricBundle(metric, hourslicer, constraint=sql, metadata=md, displayDict=displayDict) bundleList.append(bundle) reg_slicer = slicers.HealpixSlicer(nside=nside, lonCol=lonCol, latCol=latCol, latLonDeg=True) altaz_slicer = slicers.HealpixSlicer(nside=nside, latCol=altCol, latLonDeg=True, lonCol=azCol, useCache=False) unislicer = slicers.UniSlicer() for sql in sqls: # Number of exposures metric = metrics.CountMetric(mjdCol, metricName='N visits') bundle = metricBundles.MetricBundle(metric, reg_slicer, sql) bundleList.append(bundle) metric = metrics.CountMetric(mjdCol, metricName='N visits alt az') bundle = metricBundles.MetricBundle(metric, altaz_slicer, sql, plotFuncs=plotFuncs_lam) bundleList.append(bundle) metric = metrics.MeanMetric(mjdCol, metricName='Mean Visit Time') bundle = metricBundles.MetricBundle(metric, reg_slicer, sql) bundleList.append(bundle) metric = metrics.MeanMetric(mjdCol, metricName='Mean Visit Time alt az') bundle = metricBundles.MetricBundle(metric, altaz_slicer, sql, plotFuncs=plotFuncs_lam) bundleList.append(bundle) metric = metrics.CountMetric(mjdCol, metricName='N_visits') bundle = metricBundles.MetricBundle(metric, unislicer, sql) bundleList.append(bundle) # Need pairs in window to get a map of how well it gathered SS pairs. # Moon phase. metric = metrics.NChangesMetric(col='filter', metricName='Filter Changes') bundle = metricBundles.MetricBundle(metric, unislicer, 'night=%i' % night) bundleList.append(bundle) metric = metrics.BruteOSFMetric() bundle = metricBundles.MetricBundle(metric, unislicer, 'night=%i' % night) bundleList.append(bundle) metric = metrics.MeanMetric('slewTime') bundle = metricBundles.MetricBundle(metric, unislicer, 'night=%i' % night) bundleList.append(bundle) metric = metrics.MinMetric('slewTime') bundle = metricBundles.MetricBundle(metric, unislicer, 'night=%i' % night) bundleList.append(bundle) metric = metrics.MaxMetric('slewTime') bundle = metricBundles.MetricBundle(metric, unislicer, 'night=%i' % night) bundleList.append(bundle) # Make plots of the solar system pairs that were taken in the night metric = metrics.PairMetric(mjdCol=mjdCol) sql = 'night=%i and (filter ="r" or filter="g" or filter="i")' % night bundle = metricBundles.MetricBundle(metric, reg_slicer, sql) bundleList.append(bundle) metric = metrics.PairMetric(mjdCol=mjdCol, metricName='z Pairs') sql = 'night=%i and filter="z"' % night bundle = metricBundles.MetricBundle(metric, reg_slicer, sql) bundleList.append(bundle) # Plot up each visit metric = metrics.NightPointingMetric(mjdCol=mjdCol) slicer = slicers.UniSlicer() sql = 'night=%i' % night plotFuncs = [plots.NightPointingPlotter()] bundle = metricBundles.MetricBundle(metric, slicer, sql, plotFuncs=plotFuncs) bundleList.append(bundle) # stats from the note column if notes: displayDict = {'group': 'Basic Stats', 'subgroup': 'Percent stats'} metric = metrics.StringCountMetric(col='note', percent=True, metricName='Percents') bundle = metricBundles.MetricBundle(metric, unislicer, sql, displayDict=displayDict) bundleList.append(bundle) displayDict['subgroup'] = 'Count Stats' metric = metrics.StringCountMetric(col='note', metricName='Counts') bundle = metricBundles.MetricBundle(metric, unislicer, sql, displayDict=displayDict) bundleList.append(bundle) return metricBundles.makeBundlesDictFromList(bundleList)
def ResultadosNtotBolV2(FBS, mod): # ========================================================== # mod = "A" # FBS = "1.5" # modo = "A" # filtros_considerados = ["u","g"] # f1,f2 tq f2 mas rojo que f1 # ========================================================== #validacion(filtros_considerados) #f1,f2 = filtros_considerados # g_modA_LookupT_extension.pk # lookup_table = "{}_mod{}_LookupT_extension.pkl".format(f2, modo) # debe estar en la carpeta de /lookuptables en /essentials # f2 porque ese se ocupa , el f1 es para potencial lyman pbreak nomas #filtros_modo = "{}_mod{}".format("".join(filtros_considerados),modo) print("FBS usado:", FBS) print("mod:", mod) ##################################################################################### ################################## 3 BUNDLES ######################################## ##################################################################################### metric = NtotMetricV2(mod, f1f2diff=2) # ========================= WFD ================================= constraint1 = "note NOT LIKE '%DD%'" wfd_standard = schedUtils.WFD_no_gp_healpixels( 64) #, dec_max=2.5, dec_min=-62.5) slicer1 = slicers.HealpixSubsetSlicer( 64, np.where(wfd_standard == 1)[0] ) #nside = 64, hpid = The subset of healpix id's to use to calculate the metric. bundle1 = mb.MetricBundle(metric, slicer1, constraint1) # ========================= DDF ================================= constraint2 = "note LIKE '%DD%'" slicer2 = slicers.HealpixSlicer(nside=64) bundle2 = mb.MetricBundle(metric, slicer2, constraint2) print("==============================================") print("constraint WFD:" + constraint1) print("constraint DDF:" + constraint2) ##################################################################################### ################################# DIRECTORIOS ####################################### ##################################################################################### #Please enter your SciServer username between the single quotes below! # your_username = '******' # Check avaiable database directoies show_fbs_dirs() # if your_username == '': # do NOT put your username here, put it in the cell at the top of the notebook. # raise Exception('Please provide your username! See the top of the notebook.') dbDir = './lsst_cadence/FBS_{}/'.format(FBS) outDir = '/data/agonzalez/output_FBS_{}/bolNtot_mod{}_FINAL/'.format( FBS, mod) if not os.path.exists(os.path.abspath(outDir)): os.makedirs(os.path.abspath(outDir), exist_ok=True) opSimDbs, resultDbs = connect_dbs(dbDir, outDir) metricDataPath = '/data/agonzalez/output_FBS_{}/bolNtot_mod{}_FINAL/MetricData/'.format( FBS, mod) if not os.path.exists(os.path.abspath(metricDataPath)): os.makedirs(os.path.abspath(metricDataPath), exist_ok=True) print("===================================================") print("dbDir :", dbDir) print("outDir :", outDir) print("metricDataPath :", metricDataPath) print("===================================================") ##################################################################################### ################################# BUNDLE GROUP ###################################### ##################################################################################### dbRuns = show_opsims(dbDir) print(dbRuns) dbRuns = [x for x in dbRuns if "noddf" not in x] #archivo70plus = open("jhu70plus{}.txt".format(FBS),"r") #dbRuns = [x.rstrip() for x in list(archivo70plus)] #archivo70plus.close() for run in dbRuns: #[70:]: bDict = {"WFD": bundle1, "DDF": bundle2} bundle1.setRunName(run) bundle2.setRunName(run) bgroup = mb.MetricBundleGroup(bDict, opSimDbs[run], metricDataPath, resultDbs[run]) bgroup.runAll()
def eastWestBias(colmap=None, runName='opsim', extraSql=None, extraMetadata=None): """Plot the number of observations to the east vs to the west, per night. Parameters ---------- colmap : dict, opt A dictionary with a mapping of column names. Default will use OpsimV4 column names. runName : str, opt The name of the simulated survey. Default is "opsim". extraSql : str, opt Additional constraint to add to any sql constraints (e.g. 'night<365') Default None, for no additional constraints. extraMetadata : str, opt Additional metadata to add before any below (i.e. "WFD"). Default is None. """ if colmap is None: colmap = ColMapDict('opsimV4') bundleList = [] plotBundles = [] group = 'East vs West' subgroup = 'All visits' if extraMetadata is not None: subgroup = extraMetadata displayDict = {'group': group, 'subgroup': subgroup, 'order': 0} eastvswest = 180 if not colmap['raDecDeg']: eastvswest = np.radians(eastvswest) displayDict['caption'] = 'Number of visits per night that occur with azimuth <= 180.' if extraSql is not None: displayDict['caption'] += ' With additional sql constraint %s.' % extraSql metric = metrics.CountMetric(colmap['night'], metricName='Nvisits East') slicer = slicers.OneDSlicer(sliceColName=colmap['night'], binsize=1) sql = '%s <= %f' % (colmap['az'], eastvswest) if extraSql is not None: sql = '(%s) and (%s)' % (sql, extraSql) plotDict = {'color': 'orange', 'label': 'East'} bundle = mb.MetricBundle(metric, slicer, sql, metadata=extraMetadata, displayDict=displayDict, plotDict=plotDict) bundleList.append(bundle) displayDict['caption'] = 'Number of visits per night that occur with azimuth > 180.' if extraSql is not None: displayDict['caption'] += ' With additional sql constraint %s.' % extraSql metric = metrics.CountMetric(colmap['night'], metricName='Nvisits West') slicer = slicers.OneDSlicer(sliceColName=colmap['night'], binsize=1) sql = '%s > %f' % (colmap['az'], eastvswest) if extraSql is not None: sql = '(%s) and (%s)' % (sql, extraSql) plotDict = {'color': 'blue', 'label': 'West'} bundle = mb.MetricBundle(metric, slicer, sql, metadata=extraMetadata, displayDict=displayDict, plotDict=plotDict) bundleList.append(bundle) # Set the runName for all bundles and return the bundleDict. for b in bundleList: b.setRunName(runName) return mb.makeBundlesDictFromList(bundleList), plotBundles
def scienceRadarBatch(colmap=None, runName='opsim', extraSql=None, extraMetadata=None, nside=64, benchmarkArea=18000, benchmarkNvisits=825, DDF=True): """A batch of metrics for looking at survey performance relative to the SRD and the main science drivers of LSST. Parameters ---------- """ # Hide dependencies from mafContrib.LSSObsStrategy.galaxyCountsMetric_extended import GalaxyCountsMetric_extended from mafContrib import Plasticc_metric, plasticc_slicer, load_plasticc_lc, TDEsAsciiMetric if colmap is None: colmap = ColMapDict('fbs') if extraSql is None: extraSql = '' if extraSql == '': joiner = '' else: joiner = ' and ' bundleList = [] # Get some standard per-filter coloring and sql constraints filterlist, colors, filterorders, filtersqls, filtermetadata = filterList( all=False, extraSql=extraSql, extraMetadata=extraMetadata) standardStats = standardSummary(withCount=False) healslicer = slicers.HealpixSlicer(nside=nside) subsetPlots = [plots.HealpixSkyMap(), plots.HealpixHistogram()] # Load up the plastic light curves models = ['SNIa-normal', 'KN'] plasticc_models_dict = {} for model in models: plasticc_models_dict[model] = list( load_plasticc_lc(model=model).values()) ######################### # SRD, DM, etc ######################### fOb = fOBatch(runName=runName, colmap=colmap, extraSql=extraSql, extraMetadata=extraMetadata, benchmarkArea=benchmarkArea, benchmarkNvisits=benchmarkNvisits) astromb = astrometryBatch(runName=runName, colmap=colmap, extraSql=extraSql, extraMetadata=extraMetadata) rapidb = rapidRevisitBatch(runName=runName, colmap=colmap, extraSql=extraSql, extraMetadata=extraMetadata) # loop through and modify the display dicts - set SRD as group and their previous 'group' as the subgroup temp_list = [] for key in fOb: temp_list.append(fOb[key]) for key in astromb: temp_list.append(astromb[key]) for key in rapidb: temp_list.append(rapidb[key]) for metricb in temp_list: metricb.displayDict['subgroup'] = metricb.displayDict['group'].replace( 'SRD', '').lstrip(' ') metricb.displayDict['group'] = 'SRD' bundleList.extend(temp_list) displayDict = { 'group': 'SRD', 'subgroup': 'Year Coverage', 'order': 0, 'caption': 'Number of years with observations.' } slicer = slicers.HealpixSlicer(nside=nside) metric = metrics.YearCoverageMetric() for f in filterlist: plotDict = {'colorMin': 7, 'colorMax': 10, 'color': colors[f]} summary = [ metrics.AreaSummaryMetric(area=18000, reduce_func=np.mean, decreasing=True, metricName='N Seasons (18k) %s' % f) ] bundleList.append( mb.MetricBundle(metric, slicer, filtersqls[f], plotDict=plotDict, metadata=filtermetadata[f], displayDict=displayDict, summaryMetrics=summary)) ######################### # Solar System ######################### # Generally, we need to run Solar System metrics separately; they're a multi-step process. ######################### # Cosmology ######################### displayDict = { 'group': 'Cosmology', 'subgroup': 'Galaxy Counts', 'order': 0, 'caption': None } plotDict = {'percentileClip': 95., 'nTicks': 5} sql = extraSql + joiner + 'filter="i"' metadata = combineMetadata(extraMetadata, 'i band') metric = GalaxyCountsMetric_extended(filterBand='i', redshiftBin='all', nside=nside) summary = [ metrics.AreaSummaryMetric(area=18000, reduce_func=np.sum, decreasing=True, metricName='N Galaxies (18k)') ] summary.append(metrics.SumMetric(metricName='N Galaxies (all)')) # make sure slicer has cache off slicer = slicers.HealpixSlicer(nside=nside, useCache=False) bundle = mb.MetricBundle(metric, slicer, sql, plotDict=plotDict, metadata=metadata, displayDict=displayDict, summaryMetrics=summary, plotFuncs=subsetPlots) bundleList.append(bundle) displayDict['order'] += 1 # let's put Type Ia SN in here displayDict['subgroup'] = 'SNe Ia' # XXX-- use the light curves from PLASTICC here displayDict['caption'] = 'Fraction of normal SNe Ia' sql = extraSql slicer = plasticc_slicer(plcs=plasticc_models_dict['SNIa-normal'], seed=42, badval=0) metric = Plasticc_metric(metricName='SNIa') # Set the maskval so that we count missing objects as zero. summary_stats = [metrics.MeanMetric(maskVal=0)] plotFuncs = [plots.HealpixSkyMap()] bundle = mb.MetricBundle(metric, slicer, sql, runName=runName, summaryMetrics=summary_stats, plotFuncs=plotFuncs, metadata=extraMetadata, displayDict=displayDict) bundleList.append(bundle) displayDict['order'] += 1 displayDict['subgroup'] = 'Camera Rotator' displayDict[ 'caption'] = 'Kuiper statistic (0 is uniform, 1 is delta function) of the ' slicer = slicers.HealpixSlicer(nside=nside) metric1 = metrics.KuiperMetric('rotSkyPos') metric2 = metrics.KuiperMetric('rotTelPos') for f in filterlist: for m in [metric1, metric2]: plotDict = {'color': colors[f]} displayDict['order'] = filterorders[f] displayDict['caption'] += f"{m.colname} for visits in {f} band." bundleList.append( mb.MetricBundle(m, slicer, filtersqls[f], plotDict=plotDict, displayDict=displayDict, summaryMetrics=standardStats, plotFuncs=subsetPlots)) # XXX--need some sort of metric for weak lensing ######################### # Variables and Transients ######################### displayDict = { 'group': 'Variables/Transients', 'subgroup': 'Periodic Stars', 'order': 0, 'caption': None } for period in [ 0.5, 1, 2, ]: for magnitude in [21., 24.]: amplitudes = [0.05, 0.1, 1.0] periods = [period] * len(amplitudes) starMags = [magnitude] * len(amplitudes) plotDict = { 'nTicks': 3, 'colorMin': 0, 'colorMax': 3, 'xMin': 0, 'xMax': 3 } metadata = combineMetadata( 'P_%.1f_Mag_%.0f_Amp_0.05-0.1-1' % (period, magnitude), extraMetadata) sql = None displayDict['caption'] = 'Metric evaluates if a periodic signal of period %.1f days could ' \ 'be detected for an r=%i star. A variety of amplitudes of periodicity ' \ 'are tested: [1, 0.1, and 0.05] mag amplitudes, which correspond to ' \ 'metric values of [1, 2, or 3]. ' % (period, magnitude) metric = metrics.PeriodicDetectMetric(periods=periods, starMags=starMags, amplitudes=amplitudes, metricName='PeriodDetection') bundle = mb.MetricBundle(metric, healslicer, sql, metadata=metadata, displayDict=displayDict, plotDict=plotDict, plotFuncs=subsetPlots, summaryMetrics=standardStats) bundleList.append(bundle) displayDict['order'] += 1 # XXX add some PLASTICC metrics for kilovnova and tidal disruption events. displayDict['subgroup'] = 'KN' displayDict['caption'] = 'Fraction of Kilonova (from PLASTICC)' displayDict['order'] = 0 slicer = plasticc_slicer(plcs=plasticc_models_dict['KN'], seed=43, badval=0) metric = Plasticc_metric(metricName='KN') plotFuncs = [plots.HealpixSkyMap()] summary_stats = [metrics.MeanMetric(maskVal=0)] bundle = mb.MetricBundle(metric, slicer, extraSql, runName=runName, summaryMetrics=summary_stats, plotFuncs=plotFuncs, metadata=extraMetadata, displayDict=displayDict) bundleList.append(bundle) # Tidal Disruption Events displayDict['subgroup'] = 'TDE' displayDict[ 'caption'] = 'Fraction of TDE lightcurves that could be identified, outside of DD fields' detectSNR = {'u': 5, 'g': 5, 'r': 5, 'i': 5, 'z': 5, 'y': 5} # light curve parameters epochStart = -22 peakEpoch = 0 nearPeakT = 10 postPeakT = 14 # two weeks nPhaseCheck = 1 # condition parameters nObsTotal = {'u': 0, 'g': 0, 'r': 0, 'i': 0, 'z': 0, 'y': 0} nObsPrePeak = 1 nObsNearPeak = {'u': 0, 'g': 0, 'r': 0, 'i': 0, 'z': 0, 'y': 0} nFiltersNearPeak = 3 nObsPostPeak = 0 nFiltersPostPeak = 2 metric = TDEsAsciiMetric(asciifile=None, detectSNR=detectSNR, epochStart=epochStart, peakEpoch=peakEpoch, nearPeakT=nearPeakT, postPeakT=postPeakT, nPhaseCheck=nPhaseCheck, nObsTotal=nObsTotal, nObsPrePeak=nObsPrePeak, nObsNearPeak=nObsNearPeak, nFiltersNearPeak=nFiltersNearPeak, nObsPostPeak=nObsPostPeak, nFiltersPostPeak=nFiltersPostPeak) slicer = slicers.HealpixSlicer(nside=32) sql = extraSql + joiner + "note not like '%DD%'" md = extraMetadata if md is None: md = " NonDD" else: md += 'NonDD' bundle = mb.MetricBundle(metric, slicer, sql, runName=runName, summaryMetrics=standardStats, plotFuncs=plotFuncs, metadata=md, displayDict=displayDict) bundleList.append(bundle) # XXX -- would be good to add some microlensing events, for both MW and LMC/SMC. ######################### # Milky Way ######################### displayDict = {'group': 'Milky Way', 'subgroup': ''} displayDict['subgroup'] = 'N stars' slicer = slicers.HealpixSlicer(nside=nside, useCache=False) sum_stats = [metrics.SumMetric(metricName='Total N Stars')] for f in filterlist: displayDict['order'] = filterorders[f] displayDict['caption'] = 'Number of stars in %s band with an measurement error due to crowding ' \ 'of less than 0.1 mag' % f # Configure the NstarsMetric - note 'filtername' refers to the filter in which to evaluate crowding metric = metrics.NstarsMetric(crowding_error=0.1, filtername='r', seeingCol=colmap['seeingGeom'], m5Col=colmap['fiveSigmaDepth']) plotDict = {'nTicks': 5, 'logScale': True, 'colorMin': 100} bundle = mb.MetricBundle(metric, slicer, filtersqls[f], runName=runName, summaryMetrics=sum_stats, plotFuncs=subsetPlots, plotDict=plotDict, displayDict=displayDict) bundleList.append(bundle) ######################### # DDF ######################### if DDF: # Hide this import to avoid adding a dependency. from lsst.sims.featureScheduler.surveys import generate_dd_surveys, Deep_drilling_survey ddf_surveys = generate_dd_surveys() # Add on the Euclid fields # XXX--to update. Should have a spot where all the DDF locations are stored. ddf_surveys.append( Deep_drilling_survey([], 58.97, -49.28, survey_name='DD:EDFSa')) ddf_surveys.append( Deep_drilling_survey([], 63.6, -47.60, survey_name='DD:EDFSb')) # For doing a high-res sampling of the DDF for co-adds ddf_radius = 1.8 # Degrees ddf_nside = 512 ra, dec = hpid2RaDec(ddf_nside, np.arange(hp.nside2npix(ddf_nside))) displayDict = {'group': 'DDF depths', 'subgroup': None} for survey in ddf_surveys: displayDict['subgroup'] = survey.survey_name # Crop off the u-band only DDF if survey.survey_name[0:4] != 'DD:u': dist_to_ddf = angularSeparation(ra, dec, np.degrees(survey.ra), np.degrees(survey.dec)) goodhp = np.where(dist_to_ddf <= ddf_radius) slicer = slicers.UserPointsSlicer(ra=ra[goodhp], dec=dec[goodhp], useCamera=False) for f in filterlist: metric = metrics.Coaddm5Metric( metricName=survey.survey_name + ', ' + f) summary = [ metrics.MedianMetric(metricName='Median depth ' + survey.survey_name + ', ' + f) ] plotDict = {'color': colors[f]} sql = filtersqls[f] displayDict['order'] = filterorders[f] displayDict['caption'] = 'Coadded m5 depth in %s band.' % ( f) bundle = mb.MetricBundle(metric, slicer, sql, metadata=filtermetadata[f], displayDict=displayDict, summaryMetrics=summary, plotFuncs=[], plotDict=plotDict) bundleList.append(bundle) displayDict = {'group': 'DDF Transients', 'subgroup': None} for survey in ddf_surveys: displayDict['subgroup'] = survey.survey_name if survey.survey_name[0:4] != 'DD:u': slicer = plasticc_slicer( plcs=plasticc_models_dict['SNIa-normal'], seed=42, ra_cen=survey.ra, dec_cen=survey.dec, radius=np.radians(3.), useCamera=False) metric = Plasticc_metric(metricName=survey.survey_name + ' SNIa') sql = extraSql summary_stats = [metrics.MeanMetric(maskVal=0)] plotFuncs = [plots.HealpixSkyMap()] bundle = mb.MetricBundle(metric, slicer, sql, runName=runName, summaryMetrics=summary_stats, plotFuncs=plotFuncs, metadata=extraMetadata, displayDict=displayDict) bundleList.append(bundle) displayDict['order'] = 10 # Set the runName for all bundles and return the bundleDict. for b in bundleList: b.setRunName(runName) bundleDict = mb.makeBundlesDictFromList(bundleList) return bundleDict