def testProperMotionMetric(self): """ Test the ProperMotion metric. """ names = [ 'expMJD', 'finSeeing', 'fiveSigmaDepth', 'fieldRA', 'fieldDec', 'filter' ] types = [float, float, float, float, float, '<U1'] data = np.zeros(700, dtype=list(zip(names, types))) slicePoint = [0] stacker = stackers.ParallaxFactorStacker() normFlags = [False, True] data['expMJD'] = np.arange(700) + 56762 data['finSeeing'] = 0.7 data['filter'][0:100] = str('r') data['filter'][100:200] = str('u') data['filter'][200:] = str('g') data['fiveSigmaDepth'] = 24. data = stacker.run(data) for flag in normFlags: data['finSeeing'] = 0.7 data['fiveSigmaDepth'] = 24 baseline = metrics.ProperMotionMetric(normalize=flag, seeingCol='finSeeing').run( data, slicePoint) data['finSeeing'] = data['finSeeing'] + .3 worse1 = metrics.ProperMotionMetric(normalize=flag, seeingCol='finSeeing').run( data, slicePoint) worse2 = metrics.ProperMotionMetric(normalize=flag, rmag=22., seeingCol='finSeeing').run( data, slicePoint) worse3 = metrics.ProperMotionMetric(normalize=flag, rmag=22., seeingCol='finSeeing').run( data[0:300], slicePoint) data['fiveSigmaDepth'] = data['fiveSigmaDepth'] - 1. worse4 = metrics.ProperMotionMetric(normalize=flag, rmag=22., seeingCol='finSeeing').run( data[0:300], slicePoint) # Make sure the RMS increases as seeing increases, the star gets fainter, # the background gets brighter, or the baseline decreases. if flag: # When normalized, mag of star and m5 don't matter (just scheduling). self.assertAlmostEqual(worse2, worse1) self.assertAlmostEqual(worse4, worse3) # But using fewer points should make proper motion worse. # survey assumed to have same seeing and limiting mags. assert (worse3 < worse2) else: assert (worse1 > baseline) assert (worse2 > worse1) assert (worse3 > worse2) assert (worse4 > worse3)
def makeBundleList(dbFile, runName=None, nside=64, benchmark='design', lonCol='fieldRA', latCol='fieldDec', seeingCol='seeingFwhmGeom'): """ make a list of metricBundle objects to look at the scientific performance of an opsim run. """ # List to hold everything we're going to make bundleList = [] # List to hold metrics that shouldn't be saved noSaveBundleList = [] # Connect to the databse opsimdb = db.OpsimDatabaseV4(dbFile) if runName is None: runName = os.path.basename(dbFile).replace('_sqlite.db', '') # Fetch the proposal ID values from the database propids, propTags = opsimdb.fetchPropInfo() # Fetch the telescope location from config lat, lon, height = opsimdb.fetchLatLonHeight() # Add metadata regarding dithering/non-dithered. commonname = ''.join([a for a in lonCol if a in latCol]) if commonname == 'field': slicermetadata = ' (non-dithered)' else: slicermetadata = ' (%s)' % (commonname) # Construct a WFD SQL where clause so multiple propIDs can query by WFD: wfdWhere = opsimdb.createSQLWhere('WFD', propTags) print('#FYI: WFD "where" clause: %s' % (wfdWhere)) ddWhere = opsimdb.createSQLWhere('DD', propTags) print('#FYI: DD "where" clause: %s' % (ddWhere)) # Set up benchmark values, scaled to length of opsim run. runLength = opsimdb.fetchRunLength() if benchmark == 'requested': # Fetch design values for seeing/skybrightness/single visit depth. benchmarkVals = utils.scaleBenchmarks(runLength, benchmark='design') # Update nvisits with requested visits from config files. benchmarkVals['nvisits'] = opsimdb.fetchRequestedNvisits(propId=propTags['WFD']) # Calculate expected coadded depth. benchmarkVals['coaddedDepth'] = utils.calcCoaddedDepth(benchmarkVals['nvisits'], benchmarkVals['singleVisitDepth']) elif (benchmark == 'stretch') or (benchmark == 'design'): # Calculate benchmarks for stretch or design. benchmarkVals = utils.scaleBenchmarks(runLength, benchmark=benchmark) benchmarkVals['coaddedDepth'] = utils.calcCoaddedDepth(benchmarkVals['nvisits'], benchmarkVals['singleVisitDepth']) else: raise ValueError('Could not recognize benchmark value %s, use design, stretch or requested.' % (benchmark)) # Check that nvisits is not set to zero (for very short run length). for f in benchmarkVals['nvisits']: if benchmarkVals['nvisits'][f] == 0: print('Updating benchmark nvisits value in %s to be nonzero' % (f)) benchmarkVals['nvisits'][f] = 1 # Set values for min/max range of nvisits for All/WFD and DD plots. These are somewhat arbitrary. nvisitsRange = {} nvisitsRange['all'] = {'u': [20, 80], 'g': [50, 150], 'r': [100, 250], 'i': [100, 250], 'z': [100, 300], 'y': [100, 300]} nvisitsRange['DD'] = {'u': [6000, 10000], 'g': [2500, 5000], 'r': [5000, 8000], 'i': [5000, 8000], 'z': [7000, 10000], 'y': [5000, 8000]} # Scale these ranges for the runLength. scale = runLength / 10.0 for prop in nvisitsRange: for f in nvisitsRange[prop]: for i in [0, 1]: nvisitsRange[prop][f][i] = int(np.floor(nvisitsRange[prop][f][i] * scale)) # Filter list, and map of colors (for plots) to filters. filters = ['u', 'g', 'r', 'i', 'z', 'y'] colors = {'u': 'cyan', 'g': 'g', 'r': 'y', 'i': 'r', 'z': 'm', 'y': 'k'} filtorder = {'u': 1, 'g': 2, 'r': 3, 'i': 4, 'z': 5, 'y': 6} # Easy way to run through all fi # Set up a list of common summary stats commonSummary = [metrics.MeanMetric(), metrics.RobustRmsMetric(), metrics.MedianMetric(), metrics.PercentileMetric(metricName='25th%ile', percentile=25), metrics.PercentileMetric(metricName='75th%ile', percentile=75), metrics.MinMetric(), metrics.MaxMetric()] allStats = commonSummary # Set up some 'group' labels reqgroup = 'A: Required SRD metrics' depthgroup = 'B: Depth per filter' uniformitygroup = 'C: Uniformity' airmassgroup = 'D: Airmass distribution' seeinggroup = 'E: Seeing distribution' transgroup = 'F: Transients' sngroup = 'G: SN Ia' altAzGroup = 'H: Alt Az' rangeGroup = 'I: Range of Dates' intergroup = 'J: Inter-Night' phaseGroup = 'K: Max Phase Gap' NEOGroup = 'L: NEO Detection' # Set up an object to track the metricBundles that we want to combine into merged plots. mergedHistDict = {} # Set the histogram merge function. mergeFunc = plots.HealpixHistogram() keys = ['NVisits', 'coaddm5', 'NormEffTime', 'Minseeing', 'seeingAboveLimit', 'minAirmass', 'fracAboveAirmass'] for key in keys: mergedHistDict[key] = plots.PlotBundle(plotFunc=mergeFunc) ## # Calculate the fO metrics for all proposals and WFD only. order = 0 for prop in ('All prop', 'WFD only'): if prop == 'All prop': metadata = 'All Visits' + slicermetadata sqlconstraint = '' if prop == 'WFD only': metadata = 'WFD only' + slicermetadata sqlconstraint = '%s' % (wfdWhere) # Configure the count metric which is what is used for f0 slicer. m1 = metrics.CountMetric(col='observationStartMJD', metricName='fO') plotDict = {'xlabel': 'Number of Visits', 'Asky': benchmarkVals['Area'], 'Nvisit': benchmarkVals['nvisitsTotal'], 'xMin': 0, 'xMax': 1500} summaryMetrics = [metrics.fOArea(nside=nside, norm=False, metricName='fOArea: Nvisits (#)', Asky=benchmarkVals['Area'], Nvisit=benchmarkVals['nvisitsTotal']), metrics.fOArea(nside=nside, norm=True, metricName='fOArea: Nvisits/benchmark', Asky=benchmarkVals['Area'], Nvisit=benchmarkVals['nvisitsTotal']), metrics.fONv(nside=nside, norm=False, metricName='fONv: Area (sqdeg)', Asky=benchmarkVals['Area'], Nvisit=benchmarkVals['nvisitsTotal']), metrics.fONv(nside=nside, norm=True, metricName='fONv: Area/benchmark', Asky=benchmarkVals['Area'], Nvisit=benchmarkVals['nvisitsTotal'])] caption = 'The FO metric evaluates the overall efficiency of observing. ' caption += ('fOArea: Nvisits = %.1f sq degrees receive at least this many visits out of %d. ' % (benchmarkVals['Area'], benchmarkVals['nvisitsTotal'])) caption += ('fONv: Area = this many square degrees out of %.1f receive at least %d visits.' % (benchmarkVals['Area'], benchmarkVals['nvisitsTotal'])) displayDict = {'group': reqgroup, 'subgroup': 'F0', 'displayOrder': order, 'caption': caption} order += 1 slicer = slicers.HealpixSlicer(nside=nside, lonCol=lonCol, latCol=latCol) bundle = metricBundles.MetricBundle(m1, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, summaryMetrics=summaryMetrics, plotFuncs=[plots.FOPlot()], runName=runName, metadata=metadata) bundleList.append(bundle) ### # Calculate the Rapid Revisit Metrics. order = 0 metadata = 'All Visits' + slicermetadata sqlconstraint = '' dTmin = 40.0 # seconds dTmax = 30.0*60. # seconds minNvisit = 100 pixArea = float(hp.nside2pixarea(nside, degrees=True)) scale = pixArea * hp.nside2npix(nside) cutoff1 = 0.15 extraStats1 = [metrics.FracBelowMetric(cutoff=cutoff1, scale=scale, metricName='Area (sq deg)')] extraStats1.extend(commonSummary) slicer = slicers.HealpixSlicer(nside=nside, lonCol=lonCol, latCol=latCol) m1 = metrics.RapidRevisitMetric(metricName='RapidRevisitUniformity', dTmin=dTmin / 60.0 / 60.0 / 24.0, dTmax=dTmax / 60.0 / 60.0 / 24.0, minNvisits=minNvisit) plotDict = {'xMin': 0, 'xMax': 1} summaryStats = extraStats1 caption = 'Deviation from uniformity for short revisit timescales, between %s and %s seconds, ' % ( dTmin, dTmax) caption += 'for pointings with at least %d visits in this time range. ' % (minNvisit) caption += 'Summary statistic "Area" below indicates the area on the sky which has a ' caption += 'deviation from uniformity of < %.2f.' % (cutoff1) displayDict = {'group': reqgroup, 'subgroup': 'Rapid Revisit', 'displayOrder': order, 'caption': caption} bundle = metricBundles.MetricBundle(m1, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, summaryMetrics=summaryStats, runName=runName, metadata=metadata) bundleList.append(bundle) order += 1 dTmax = dTmax/60.0 # need time in minutes for Nrevisits metric m2 = metrics.NRevisitsMetric(dT=dTmax) plotDict = {'xMin': 0.1, 'xMax': 2000, 'logScale': True} cutoff2 = 800 extraStats2 = [metrics.FracAboveMetric(cutoff=cutoff2, scale=scale, metricName='Area (sq deg)')] extraStats2.extend(commonSummary) caption = 'Number of consecutive visits with return times faster than %.1f minutes, ' % (dTmax) caption += 'in any filter, all proposals. ' caption += 'Summary statistic "Area" below indicates the area on the sky which has more than ' caption += '%d revisits within this time window.' % (cutoff2) summaryStats = extraStats2 displayDict = {'group': reqgroup, 'subgroup': 'Rapid Revisit', 'displayOrder': order, 'caption': caption} bundle = metricBundles.MetricBundle(m2, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, summaryMetrics=summaryStats, runName=runName, metadata=metadata) bundleList.append(bundle) order += 1 m3 = metrics.NRevisitsMetric(dT=dTmax, normed=True) plotDict = {'xMin': 0, 'xMax': 1, 'cbarFormat': '%.1f'} cutoff3 = 0.6 extraStats3 = [metrics.FracAboveMetric(cutoff=cutoff3, scale=scale, metricName='Area (sq deg)')] extraStats3.extend(commonSummary) summaryStats = extraStats3 caption = 'Fraction of total visits where consecutive visits have return times faster ' caption += 'than %.1f minutes, in any filter, all proposals. ' % (dTmax) caption += 'Summary statistic "Area" below indicates the area on the sky which has more ' caption += 'than %d revisits within this time window.' % (cutoff3) displayDict = {'group': reqgroup, 'subgroup': 'Rapid Revisit', 'displayOrder': order, 'caption': caption} bundle = metricBundles.MetricBundle(m3, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, summaryMetrics=summaryStats, runName=runName, metadata=metadata) bundleList.append(bundle) order += 1 # And add a histogram of the time between quick revisits. binMin = 0 binMax = 120. binsize = 3. bins_metric = np.arange(binMin / 60.0 / 24.0, (binMax + binsize) / 60. / 24., binsize / 60. / 24.) bins_plot = bins_metric * 24.0 * 60.0 m1 = metrics.TgapsMetric(bins=bins_metric, metricName='dT visits') plotDict = {'bins': bins_plot, 'xlabel': 'dT (minutes)'} caption = ('Histogram of the time between consecutive revisits (<%.1f minutes), over entire sky.' % (binMax)) displayDict = {'group': reqgroup, 'subgroup': 'Rapid Revisit', 'order': order, 'caption': caption} slicer = slicers.HealpixSlicer(nside=nside, lonCol=lonCol, latCol=latCol) plotFunc = plots.SummaryHistogram() bundle = metricBundles.MetricBundle(m1, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, runName=runName, metadata=metadata, plotFuncs=[plotFunc]) bundleList.append(bundle) order += 1 ## # Trigonometric parallax and proper motion @ r=20 and r=24 slicer = slicers.HealpixSlicer(nside=nside, lonCol=lonCol, latCol=latCol) sqlconstraint = '' order = 0 metric = metrics.ParallaxMetric(metricName='Parallax 20', rmag=20, seeingCol=seeingCol) summaryStats = allStats plotDict = {'cbarFormat': '%.1f', 'xMin': 0, 'xMax': 3} displayDict = {'group': reqgroup, 'subgroup': 'Parallax', 'order': order, 'caption': 'Parallax precision at r=20. (without refraction).'} bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, summaryMetrics=summaryStats, runName=runName, metadata=metadata) bundleList.append(bundle) order += 1 metric = metrics.ParallaxMetric(metricName='Parallax 24', rmag=24, seeingCol=seeingCol) plotDict = {'cbarFormat': '%.1f', 'xMin': 0, 'xMax': 10} displayDict = {'group': reqgroup, 'subgroup': 'Parallax', 'order': order, 'caption': 'Parallax precision at r=24. (without refraction).'} bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, summaryMetrics=summaryStats, runName=runName, metadata=metadata) bundleList.append(bundle) order += 1 metric = metrics.ParallaxMetric(metricName='Parallax Normed', rmag=24, normalize=True, seeingCol=seeingCol) plotDict = {'xMin': 0.5, 'xMax': 1.0} displayDict = {'group': reqgroup, 'subgroup': 'Parallax', 'order': order, 'caption': 'Normalized parallax (normalized to optimum observation cadence, 1=optimal).'} bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, summaryMetrics=summaryStats, runName=runName, metadata=metadata) bundleList.append(bundle) order += 1 metric = metrics.ParallaxCoverageMetric(metricName='Parallax Coverage 20', rmag=20, seeingCol=seeingCol) plotDict = {} caption = "Parallax factor coverage for an r=20 star (0 is bad, 0.5-1 is good). " caption += "One expects the parallax factor coverage to vary because stars on the ecliptic " caption += "can be observed when they have no parallax offset while stars at the pole are always " caption += "offset by the full parallax offset.""" displayDict = {'group': reqgroup, 'subgroup': 'Parallax', 'order': order, 'caption': caption} bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, summaryMetrics=summaryStats, runName=runName, metadata=metadata) bundleList.append(bundle) order += 1 metric = metrics.ParallaxCoverageMetric(metricName='Parallax Coverage 24', rmag=24, seeingCol=seeingCol) plotDict = {} caption = "Parallax factor coverage for an r=24 star (0 is bad, 0.5-1 is good). " caption += "One expects the parallax factor coverage to vary because stars on the ecliptic " caption += "can be observed when they have no parallax offset while stars at the pole are always " caption += "offset by the full parallax offset.""" displayDict = {'group': reqgroup, 'subgroup': 'Parallax', 'order': order, 'caption': caption} bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, summaryMetrics=summaryStats, runName=runName, metadata=metadata) bundleList.append(bundle) order += 1 metric = metrics.ParallaxDcrDegenMetric(metricName='Parallax-DCR degeneracy 20', rmag=20, seeingCol=seeingCol) plotDict = {} caption = 'Correlation between parallax offset magnitude and hour angle an r=20 star.' caption += ' (0 is good, near -1 or 1 is bad).' displayDict = {'group': reqgroup, 'subgroup': 'Parallax', 'order': order, 'caption': caption} bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, summaryMetrics=summaryStats, runName=runName, metadata=metadata) bundleList.append(bundle) order += 1 metric = metrics.ParallaxDcrDegenMetric(metricName='Parallax-DCR degeneracy 24', rmag=24, seeingCol=seeingCol) plotDict = {} caption = 'Correlation between parallax offset magnitude and hour angle an r=24 star.' caption += ' (0 is good, near -1 or 1 is bad).' displayDict = {'group': reqgroup, 'subgroup': 'Parallax', 'order': order, 'caption': caption} bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, summaryMetrics=summaryStats, runName=runName, metadata=metadata) bundleList.append(bundle) order += 1 metric = metrics.ProperMotionMetric(metricName='Proper Motion 20', rmag=20, seeingCol=seeingCol) summaryStats = allStats plotDict = {'xMin': 0, 'xMax': 3} displayDict = {'group': reqgroup, 'subgroup': 'Proper Motion', 'order': order, 'caption': 'Proper Motion precision at r=20.'} bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, summaryMetrics=summaryStats, runName=runName, metadata=metadata) bundleList.append(bundle) order += 1 metric = metrics.ProperMotionMetric(rmag=24, metricName='Proper Motion 24', seeingCol=seeingCol) summaryStats = allStats plotDict = {'xMin': 0, 'xMax': 10} displayDict = {'group': reqgroup, 'subgroup': 'Proper Motion', 'order': order, 'caption': 'Proper Motion precision at r=24.'} bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, summaryMetrics=summaryStats, runName=runName, metadata=metadata) bundleList.append(bundle) order += 1 metric = metrics.ProperMotionMetric(rmag=24, normalize=True, metricName='Proper Motion Normed', seeingCol=seeingCol) plotDict = {'xMin': 0.2, 'xMax': 0.7} caption = 'Normalized proper motion at r=24. ' caption += '(normalized to optimum observation cadence - start/end. 1=optimal).' displayDict = {'group': reqgroup, 'subgroup': 'Proper Motion', 'order': order, 'caption': caption} bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, summaryMetrics=summaryStats, runName=runName, metadata=metadata) bundleList.append(bundle) order += 1 ## # Calculate the time uniformity in each filter, for each year. order = 0 slicer = slicers.HealpixSlicer(nside=nside, lonCol=lonCol, latCol=latCol) plotFuncs = [plots.TwoDMap()] step = 0.5 bins = np.arange(0, 365.25 * 10 + 40, 40) - step metric = metrics.AccumulateUniformityMetric(bins=bins) plotDict = {'xlabel': 'Night (days)', 'xextent': [bins.min( ) + step, bins.max() + step], 'cbarTitle': 'Uniformity'} for f in filters: sqlconstraint = 'filter = "%s"' % (f) caption = 'Deviation from uniformity in %s band. ' % f caption += 'Northern Healpixels are at the top of the image.' caption += '(0=perfectly uniform, 1=perfectly nonuniform).' displayDict = {'group': uniformitygroup, 'subgroup': 'per night', 'order': filtorder[f], 'caption': caption} metadata = '%s band' % (f) + slicermetadata bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, runName=runName, metadata=metadata, plotFuncs=plotFuncs) noSaveBundleList.append(bundle) ## # Depth metrics. slicer = slicers.HealpixSlicer(nside=nside, lonCol=lonCol, latCol=latCol) for f in filters: propCaption = '%s band, all proposals %s' % (f, slicermetadata) sqlconstraint = 'filter = "%s"' % (f) metadata = '%s band' % (f) + slicermetadata # Number of visits. metric = metrics.CountMetric(col='observationStartMJD', metricName='NVisits') plotDict = {'xlabel': 'Number of visits', 'xMin': nvisitsRange['all'][f][0], 'xMax': nvisitsRange['all'][f][1], 'colorMin': nvisitsRange['all'][f][0], 'colorMax': nvisitsRange['all'][f][1], 'binsize': 5, 'logScale': True, 'nTicks': 4, 'colorMin': 1} summaryStats = allStats displayDict = {'group': depthgroup, 'subgroup': 'Nvisits', 'order': filtorder[f], 'caption': 'Number of visits in filter %s, %s.' % (f, propCaption)} histMerge = {'color': colors[f], 'label': '%s' % (f), 'binsize': 5, 'xMin': nvisitsRange['all'][f][0], 'xMax': nvisitsRange['all'][f][1], 'legendloc': 'upper right'} bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, runName=runName, metadata=metadata, summaryMetrics=summaryStats) mergedHistDict['NVisits'].addBundle(bundle, plotDict=histMerge) bundleList.append(bundle) # Coadded depth. metric = metrics.Coaddm5Metric() plotDict = {'zp': benchmarkVals['coaddedDepth'][f], 'xMin': -0.8, 'xMax': 0.8, 'xlabel': 'coadded m5 - %.1f' % benchmarkVals['coaddedDepth'][f]} summaryStats = allStats histMerge = {'legendloc': 'upper right', 'color': colors[f], 'label': '%s' % f, 'binsize': .02, 'xlabel': 'coadded m5 - benchmark value'} caption = ('Coadded depth in filter %s, with %s value subtracted (%.1f), %s. ' % (f, benchmark, benchmarkVals['coaddedDepth'][f], propCaption)) caption += 'More positive numbers indicate fainter limiting magnitudes.' displayDict = {'group': depthgroup, 'subgroup': 'Coadded Depth', 'order': filtorder[f], 'caption': caption} bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, runName=runName, metadata=metadata, summaryMetrics=summaryStats) mergedHistDict['coaddm5'].addBundle(bundle, plotDict=histMerge) bundleList.append(bundle) # Effective time. metric = metrics.TeffMetric(metricName='Normalized Effective Time', normed=True, fiducialDepth=benchmarkVals['singleVisitDepth']) plotDict = {'xMin': 0.1, 'xMax': 1.1} summaryStats = allStats histMerge = {'legendLoc': 'upper right', 'color': colors[f], 'label': '%s' % f, 'binsize': 0.02} caption = ('"Time Effective" in filter %s, calculated with fiducial single-visit depth of %s mag. ' % (f, benchmarkVals['singleVisitDepth'][f])) caption += 'Normalized by the fiducial time effective, if every observation was at ' caption += 'the fiducial depth.' displayDict = {'group': depthgroup, 'subgroup': 'Time Eff.', 'order': filtorder[f], 'caption': caption} bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, runName=runName, metadata=metadata, summaryMetrics=summaryStats) mergedHistDict['NormEffTime'].addBundle(bundle, plotDict=histMerge) bundleList.append(bundle) # Put in a z=0.5 Type Ia SN, based on Cambridge 2015 workshop notebook. # Check for 1) detection in any band, 2) detection on the rise in any band, # 3) good characterization peaks = {'uPeak': 25.9, 'gPeak': 23.6, 'rPeak': 22.6, 'iPeak': 22.7, 'zPeak': 22.7, 'yPeak': 22.8} peakTime = 15. transDuration = peakTime + 30. # Days metric = metrics.TransientMetric(riseSlope=-2. / peakTime, declineSlope=1.4 / 30.0, transDuration=transDuration, peakTime=peakTime, surveyDuration=runLength, metricName='SNDetection', **peaks) caption = 'Fraction of z=0.5 type Ia SN that are detected in any filter' displayDict = {'group': transgroup, 'subgroup': 'Detected', 'caption': caption} sqlconstraint = '' metadata = '' + slicermetadata plotDict = {} bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, runName=runName, metadata=metadata) bundleList.append(bundle) metric = metrics.TransientMetric(riseSlope=-2. / peakTime, declineSlope=1.4 / 30.0, transDuration=transDuration, peakTime=peakTime, surveyDuration=runLength, nPrePeak=1, metricName='SNAlert', **peaks) caption = 'Fraction of z=0.5 type Ia SN that are detected pre-peak in any filter' displayDict = {'group': transgroup, 'subgroup': 'Detected on the rise', 'caption': caption} plotDict = {} bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, runName=runName, metadata=metadata) bundleList.append(bundle) metric = metrics.TransientMetric(riseSlope=-2. / peakTime, declineSlope=1.4 / 30., transDuration=transDuration, peakTime=peakTime, surveyDuration=runLength, metricName='SNLots', nFilters=3, nPrePeak=3, nPerLC=2, **peaks) caption = 'Fraction of z=0.5 type Ia SN that are observed 6 times, 3 pre-peak, ' caption += '3 post-peak, with observations in 3 filters' displayDict = {'group': transgroup, 'subgroup': 'Well observed', 'caption': caption} sqlconstraint = 'filter="r" or filter="g" or filter="i" or filter="z" ' plotDict = {} bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, runName=runName, metadata=metadata) bundleList.append(bundle) # Good seeing in r/i band metrics, including in first/second years. order = 0 for tcolor, tlabel, timespan in zip(['k', 'g', 'r'], ['10 years', '1 year', '2 years'], ['', ' and night<=365', ' and night<=730']): order += 1 for f in (['r', 'i']): sqlconstraint = 'filter = "%s" %s' % (f, timespan) propCaption = '%s band, all proposals %s, over %s.' % (f, slicermetadata, tlabel) metadata = '%s band, %s' % (f, tlabel) + slicermetadata seeing_limit = 0.7 airmass_limit = 1.2 metric = metrics.MinMetric(col=seeingCol) summaryStats = allStats plotDict = {'xMin': 0.35, 'xMax': 1.5, 'color': tcolor} displayDict = {'group': seeinggroup, 'subgroup': 'Best Seeing', 'order': filtorder[f] * 100 + order, 'caption': 'Minimum FWHMgeom values in %s.' % (propCaption)} histMerge = {'label': '%s %s' % (f, tlabel), 'color': tcolor, 'binsize': 0.03, 'xMin': 0.35, 'xMax': 1.5, 'legendloc': 'upper right'} bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, runName=runName, metadata=metadata, summaryMetrics=summaryStats) mergedHistDict['Minseeing'].addBundle(bundle, plotDict=histMerge) bundleList.append(bundle) metric = metrics.FracAboveMetric(col=seeingCol, cutoff=seeing_limit) summaryStats = allStats plotDict = {'xMin': 0, 'xMax': 1.1, 'color': tcolor} displayDict = {'group': seeinggroup, 'subgroup': 'Good seeing fraction', 'order': filtorder[f] * 100 + order, 'caption': 'Fraction of total images with FWHMgeom worse than %.1f, in %s' % (seeing_limit, propCaption)} histMerge = {'color': tcolor, 'label': '%s %s' % (f, tlabel), 'binsize': 0.05, 'legendloc': 'upper right'} bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, runName=runName, metadata=metadata, summaryMetrics=summaryStats) mergedHistDict['seeingAboveLimit'].addBundle(bundle, plotDict=histMerge) bundleList.append(bundle) metric = metrics.MinMetric(col='airmass') plotDict = {'xMin': 1, 'xMax': 1.5, 'color': tcolor} summaryStats = allStats displayDict = {'group': airmassgroup, 'subgroup': 'Best Airmass', 'order': filtorder[f] * 100 + order, 'caption': 'Minimum airmass in %s.' % (propCaption)} histMerge = {'color': tcolor, 'label': '%s %s' % (f, tlabel), 'binsize': 0.03, 'legendloc': 'upper right'} bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, runName=runName, metadata=metadata, summaryMetrics=summaryStats) mergedHistDict['minAirmass'].addBundle(bundle, plotDict=histMerge) bundleList.append(bundle) metric = metrics.FracAboveMetric(col='airmass', cutoff=airmass_limit) plotDict = {'xMin': 0, 'xMax': 1, 'color': tcolor} summaryStats = allStats displayDict = {'group': airmassgroup, 'subgroup': 'Low airmass fraction', 'order': filtorder[f] * 100 + order, 'caption': 'Fraction of total images with airmass higher than %.2f, in %s' % (airmass_limit, propCaption)} histMerge = {'color': tcolor, 'label': '%s %s' % ( f, tlabel), 'binsize': 0.05, 'legendloc': 'upper right'} bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, runName=runName, metadata=metadata, summaryMetrics=summaryStats) mergedHistDict['fracAboveAirmass'].addBundle(bundle, plotDict=histMerge) bundleList.append(bundle) # SNe metrics from UK workshop. peaks = {'uPeak': 25.9, 'gPeak': 23.6, 'rPeak': 22.6, 'iPeak': 22.7, 'zPeak': 22.7, 'yPeak': 22.8} peakTime = 15. transDuration = peakTime + 30. # Days metric = metrics.TransientMetric(riseSlope=-2. / peakTime, declineSlope=1.4 / 30.0, transDuration=transDuration, peakTime=peakTime, surveyDuration=runLength, metricName='SNDetection', **peaks) caption = 'Fraction of z=0.5 type Ia SN that are detected at any point in their light curve in any filter' displayDict = {'group': sngroup, 'subgroup': 'Detected', 'caption': caption} sqlconstraint = '' plotDict = {} bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, runName=runName) bundleList.append(bundle) metric = metrics.TransientMetric(riseSlope=-2. / peakTime, declineSlope=1.4 / 30.0, transDuration=transDuration, peakTime=peakTime, surveyDuration=runLength, nPrePeak=1, metricName='SNAlert', **peaks) caption = 'Fraction of z=0.5 type Ia SN that are detected pre-peak in any filter' displayDict = {'group': sngroup, 'subgroup': 'Detected on the rise', 'caption': caption} plotDict = {} bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, runName=runName) bundleList.append(bundle) metric = metrics.TransientMetric(riseSlope=-2. / peakTime, declineSlope=1.4 / 30., transDuration=transDuration, peakTime=peakTime, surveyDuration=runLength, metricName='SNLots', nFilters=3, nPrePeak=3, nPerLC=2, **peaks) caption = 'Fraction of z=0.5 type Ia SN that are observed 6 times, 3 pre-peak, ' caption += '3 post-peak, with observations in 3 filters' displayDict = {'group': sngroup, 'subgroup': 'Well observed', 'caption': caption} sqlconstraint = 'filter="r" or filter="g" or filter="i" or filter="z" ' plotDict = {} bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, runName=runName) bundleList.append(bundle) propIDOrderDict = {} orderVal = 100 for propID in propids: propIDOrderDict[propID] = orderVal orderVal += 100 # Full range of dates: metric = metrics.FullRangeMetric(col='observationStartMJD') plotFuncs = [plots.HealpixSkyMap(), plots.HealpixHistogram()] caption = 'Time span of survey.' sqlconstraint = '' plotDict = {} displayDict = {'group': rangeGroup, 'caption': caption} bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, runName=runName) bundleList.append(bundle) for f in filters: for propid in propids: displayDict = {'group': rangeGroup, 'subgroup': propids[propid], 'caption': caption, 'order': filtorder[f]} md = '%s, %s' % (f, propids[propid]) sql = 'filter="%s" and proposalId=%i' % (f, propid) bundle = metricBundles.MetricBundle(metric, slicer, sql, plotDict=plotDict, metadata=md, plotFuncs=plotFuncs, displayDict=displayDict, runName=runName) bundleList.append(bundle) # Alt az plots slicer = slicers.HealpixSlicer(nside=64, latCol='zenithDistance', lonCol='azimuth', useCache=False) metric = metrics.CountMetric('observationStartMJD', metricName='Nvisits as function of Alt/Az') plotDict = {} plotFuncs = [plots.LambertSkyMap()] displayDict = {'group': altAzGroup, 'caption': 'Alt Az pointing distribution'} for f in filters: for propid in propids: displayDict = {'group': altAzGroup, 'subgroup': propids[propid], 'caption': 'Alt Az pointing distribution', 'order': filtorder[f]} md = '%s, %s' % (f, propids[propid]) sql = 'filter="%s" and proposalId=%i' % (f, propid) bundle = metricBundles.MetricBundle(metric, slicer, sql, plotDict=plotDict, plotFuncs=plotFuncs, metadata=md, displayDict=displayDict, runName=runName) bundleList.append(bundle) sql = '' md = 'all observations' displayDict = {'group': altAzGroup, 'subgroup': 'All Observations', 'caption': 'Alt Az pointing distribution'} bundle = metricBundles.MetricBundle(metric, slicer, sql, plotDict=plotDict, plotFuncs=plotFuncs, metadata=md, displayDict=displayDict, runName=runName) bundleList.append(bundle) # Median inter-night gap (each and all filters) slicer = slicers.HealpixSlicer(nside=nside, lonCol=lonCol, latCol=latCol) metric = metrics.InterNightGapsMetric(metricName='Median Inter-Night Gap') sqls = ['filter = "%s"' % f for f in filters] orders = [filtorder[f] for f in filters] orders.append(0) sqls.append('') for sql, order in zip(sqls, orders): displayDict = {'group': intergroup, 'subgroup': 'Median Gap', 'caption': 'Median gap between days', 'order': order} bundle = metricBundles.MetricBundle(metric, slicer, sql, displayDict=displayDict, runName=runName) bundleList.append(bundle) # Max inter-night gap in r and all bands dslicer = slicers.HealpixSlicer(nside=nside, lonCol='ditheredRA', latCol='ditheredDec') metric = metrics.InterNightGapsMetric(metricName='Max Inter-Night Gap', reduceFunc=np.max) plotDict = {'percentileClip': 95.} for sql, order in zip(sqls, orders): displayDict = {'group': intergroup, 'subgroup': 'Max Gap', 'caption': 'Max gap between nights', 'order': order} bundle = metricBundles.MetricBundle(metric, dslicer, sql, displayDict=displayDict, plotDict=plotDict, runName=runName) bundleList.append(bundle) # largest phase gap for periods periods = [0.1, 1.0, 10., 100.] sqls = {'u': 'filter = "u"', 'r': 'filter="r"', 'g,r,i,z': 'filter="g" or filter="r" or filter="i" or filter="z"', 'all': ''} for sql in sqls: for period in periods: displayDict = {'group': phaseGroup, 'subgroup': 'period=%.2f days, filter=%s' % (period, sql), 'caption': 'Maximum phase gaps'} metric = metrics.PhaseGapMetric(nPeriods=1, periodMin=period, periodMax=period, metricName='PhaseGap, %.1f' % period) bundle = metricBundles.MetricBundle(metric, slicer, sqls[sql], displayDict=displayDict, runName=runName) bundleList.append(bundle) # NEO XY plots slicer = slicers.UniSlicer() metric = metrics.PassMetric(metricName='NEODistances') stacker = stackers.NEODistStacker() stacker2 = stackers.EclipticStacker() for f in filters: plotFunc = plots.NeoDistancePlotter(eclipMax=10., eclipMin=-10.) caption = 'Observations within 10 degrees of the ecliptic. Distance an H=22 NEO would be detected' displayDict = {'group': NEOGroup, 'subgroup': 'xy', 'order': filtorder[f], 'caption': caption} plotDict = {} sqlconstraint = 'filter = "%s"' % (f) bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, displayDict=displayDict, stackerList=[stacker, stacker2], plotDict=plotDict, plotFuncs=[plotFunc]) noSaveBundleList.append(bundle) # Solar elongation sqls = ['filter = "%s"' % f for f in filters] orders = [filtorder[f] for f in filters] sqls.append('') orders.append(0) for sql, order in zip(sqls, orders): plotFuncs = [plots.HealpixSkyMap(), plots.HealpixHistogram()] displayDict = {'group': NEOGroup, 'subgroup': 'Solar Elongation', 'caption': 'Median solar elongation in degrees', 'order': order} metric = metrics.MedianMetric('solarElong') slicer = slicers.HealpixSlicer(nside=nside, lonCol=lonCol, latCol=latCol) bundle = metricBundles.MetricBundle(metric, slicer, sql, displayDict=displayDict, plotFuncs=plotFuncs) bundleList.append(bundle) plotFuncs = [plots.HealpixSkyMap(), plots.HealpixHistogram()] displayDict = {'group': NEOGroup, 'subgroup': 'Solar Elongation', 'caption': 'Minimum solar elongation in degrees', 'order': order} metric = metrics.MinMetric('solarElong') slicer = slicers.HealpixSlicer(nside=nside, lonCol=lonCol, latCol=latCol) bundle = metricBundles.MetricBundle(metric, slicer, sql, displayDict=displayDict, plotFuncs=plotFuncs) bundleList.append(bundle) return (metricBundles.makeBundlesDictFromList(bundleList), mergedHistDict, metricBundles.makeBundlesDictFromList(noSaveBundleList))
def go(nside=64, rmag=21., SedTemplate='flat', DoRun=False, LFilters = [], \ LNightMax=[], nightMax=1e4, \ CustomPlotLimits=True, \ RunOne=False, MaxRuns=1e3, \ SpatialClip=95., \ seeingCol='FWHMeff', \ sCmap='cubehelix_r', \ checkCorrKind=False, \ wfdPlane=True, \ useGRIZ=False): # Go to the directory where the sqlite databases are held... # cd /Users/clarkson/Data/LSST/OpSimRuns/opsim20160411 # WIC 2015-12-29 - set up for a master-run with all cases, this time with plotting limits # Break the specifications across lines to make subdivision easier # Subsets by time first, then by filter, finally the whole shebang # 2016-04-23 - replaced enigma_1189 --> minion_1016 # 2016-04-23 - replaced ops2_1092 --> minion_1020 # (Yes the inversion of the first two is deliberate.) runNames = ['minion_1016', 'minion_1020', 'minion_1020', 'minion_1016', \ 'minion_1020', 'minion_1016', 'minion_1020', 'minion_1016', \ 'minion_1020', 'minion_1016'] LFilters = ['', '', '', '', \ 'u', 'u', 'y', 'y', \ '', ''] LNightMax = [365, 365, 730, 730, \ 1e4, 1e4, 1e4, 1e4, \ 1e4, 1e4] # WIC try again, this time on the new astro_lsst_01_1004 only if wfdPlane: LFilters = ['', '', '', 'u', 'y'] LNightMax = [365, 730, 1e4, 1e4, 1e4] runNames = ['astro_lsst_01_1004' for i in range (len(LFilters)) ] # WIC 2016-05-01 check correlation if checkCorrKind: LFilters = ['', ''] LNightMax = [365, 365] runNames = ['minion_1016', 'minion_1016'] # Type of correlation used for HA Degen # checkCorrKind = True useSpearmanR = [False, True] if useGRIZ: runNames=['minion_1016','astro_lsst_01_1004', 'minion_1020'] LFilters = ['griz' for iRun in range(len(runNames)) ] #LNightMax = [1e4 for iRun in range(len(runNames)) ] #LNightMax = [730 for iRun in range(len(runNames)) ] LNightMax = [365 for iRun in range(len(runNames)) ] # List of upper limits to parallax and proper motion error. For parallax, 3.0 mas is probably good LUpperParallax = [] LUpperPropmotion = [] if CustomPlotLimits: LUpperParallax = [10, 10, 10, 10, \ 10, 10, 40, 40, \ 3.0, 3.0 ] # For proper motion, it's a little tricky to say because the # regular case is so pathological for the field. Try the following: LUpperPropmotion = [40, 40, 5, 20, \ 3.5, 20, 3.5, 20, \ 0.5, 5] if len(runNames) < 2: LUpperPropmotion = [100 for i in range(len(runNames))] print "runAstrom.go INFO - will run the following:" for iSho in range(len(runNames)): sFilThis = '' # print iSho, len(LFilters) if iSho <= len(LFilters): sFilThis = sqlFromFilterString(LFilters[iSho]) print "%i: %-12s, %1s, %i, sqlFilter -- %s" % (iSho, runNames[iSho], LFilters[iSho], LNightMax[iSho], sFilThis) print "===========================" print "mag max = %.2f" % (rmag) print "---------------------------" # print runNames # if not DoRun: # print "Set DoRun=True to actually run this." # print len(LFilters), len(runNames), len(LFilters) == len(runNames) # return #'kraken_1038', 'kraken_1034', 'ops2_1098'] # nside = 64 slicer = slicers.HealpixSlicer(nside=nside) # Make it so we don't bother with the silly power spectra plotFuncs = [plots.HealpixSkyMap(), plots.HealpixHistogram()] # WIC - back up the plotting arguments with a default value plotFuncsPristine = copy.deepcopy(plotFuncs) # WIC - the only way this will make sense to me is if I make a # dictionary of plot arguments. Let's try it... DPlotArgs = {} for plotArg in ['parallax', 'propmotion', 'coverage', 'HAdegen']: DPlotArgs[plotArg] = copy.deepcopy(plotFuncs) if CustomPlotLimits: # Use the same color map for all the metrics for plotMetric in DPlotArgs.keys(): DPlotArgs[plotMetric][0].defaultPlotDict['cmap'] = sCmap # Apply spatial clipping for all but the HADegen, for which we # have other limits... for plotMetric in ['parallax', 'propmotion', 'coverage']: DPlotArgs[plotMetric][0].defaultPlotDict['percentileClip'] = SpatialClip # Some limits common to spatial maps and histograms for iPl in range(0,2): DPlotArgs['propmotion'][iPl].defaultPlotDict['logScale'] = True # NOT a loop because we might want to separate out the behavior # Standardized range for the histograms for new parallax metrics DPlotArgs['coverage'][1].defaultPlotDict['xMin'] = 0. DPlotArgs['coverage'][1].defaultPlotDict['xMax'] = 1. DPlotArgs['HAdegen'][1].defaultPlotDict['xMin'] = -1. DPlotArgs['HAdegen'][1].defaultPlotDict['xMax'] = 1. # Standardize the sky map for the HAdegen as well. DPlotArgs['coverage'][1].defaultPlotDict['xMin'] = 0. DPlotArgs['coverage'][1].defaultPlotDict['xMax'] = 1. DPlotArgs['HAdegen'][0].defaultPlotDict['xMin'] = -1. DPlotArgs['HAdegen'][0].defaultPlotDict['xMax'] = 1. # Standardize at least the lower bound of the histogram in # both the proper motion and parallax errors. Upper limit we # can customize with a loop. DPlotArgs['propmotion'][1].defaultPlotDict['xMin'] = 1e-2 # should not be zero if log scale!! DPlotArgs['parallax'][1].defaultPlotDict['xMin'] = 0. # WIC - try changing the plot dictionary if not DoRun: plotFuncs[0].defaultPlotDict['logScale'] = True print DPlotArgs['propmotion'][0].defaultPlotDict print DPlotArgs['propmotion'][1].defaultPlotDict return # The old runs have the seeing in finSeeing #seeingCol = 'finSeeing' ### UPDATE THE SEEING COLUMN #seeingCol = 'FWHMeff' ## Moved up to a command-line argument # Use all the observations. Can change if you want a different # time span # sqlconstraint = '' # list of sqlconstraints now used, which gets handled within the loop. # run some summary stats on everything summaryMetrics = [metrics.MedianMetric()] tStart = time.time() # Running one, or the whole lot? RunMax = len(runNames) # allow user to set a different number (say, 2) if MaxRuns < RunMax and MaxRuns > 0: RunMax = int(MaxRuns) # the following keyword overrides if RunOne: RunMax = 1 print "Starting runs. RunMax = %i" % (RunMax) for iRun in range(RunMax): run = runNames[iRun][:] # for run in runNames: # Open the OpSim database timeStartIteration = time.time() # Some syntax added to test for existence of the database dbFil = run+'_sqlite.db' if not os.access(dbFil, os.R_OK): print "runAstrom.go FATAL - cannot acces db file %s" % (dbFil) print "runAstrom.go FATAL - skipping run %s" % (run) continue else: deltaT = time.time()-tStart print "runAstrom.go INFO - ##################################" print "runAstrom.go INFO - starting run %s with nside=%i after %.2f minutes" \ % (run, nside, deltaT/60.) opsdb = db.OpsimDatabase(run+'_sqlite.db') # Set SQL constraint appropriate for each filter in the # list. If we supplied a list of filters, use it for sqlconstraint = '' ThisFilter = 'ugrizy' if len(LFilters) == len(runNames): # Only change the filter if one was actually supplied! if len(LFilters[iRun]) > 0: ThisFilter = LFilters[iRun] sqlconstraint = sqlFromFilterString(ThisFilter) ### sqlconstraint = 'filter = "%s"' % (ThisFilter) # If nightmax was supplied, use it ThisNightMax = int(nightMax) # copy not view if len(LNightMax) == len(runNames): # Only update nightmax if one was given try: ThisNightMax = int(LNightMax[iRun]) # This might be redundant with the fmt statement below. if len(sqlconstraint) < 1: sqlconstraint = 'night < %i' % (ThisNightMax) else: sqlconstraint = '%s and night < %i' % (sqlconstraint, ThisNightMax) except: print "runAstrom.go WARN - run %i problem with NightMax" % (iRun) dumdum = 1. # Set where the output should go - include the filter!! sMag = '%.1f' % (rmag) sMag = sMag.replace(".","p") outDir = './metricEvals/%s_nside%i_%s_n%i_r%s' % (run, nside, ThisFilter, ThisNightMax, sMag) # Ensure we'll be able to find this later on... if CustomPlotLimits: outDir = '%s_lims' % (outDir) # if we are testing the kind of correlation used, include that # in the output here. if checkCorrKind: if useSpearmanR[iRun]: sCorr = 'spearmanR' else: sCorr = 'pearsonR' outDir = '%s_%s' % (outDir, sCorr) # From this point onwards, stuff actually gets run. This is # the place to output what will actually happen next. print "runAstrom.go INFO - about to run:" print "runAstrom.go INFO - sqlconstraint: %s ; run name %s ; nside %i" % (sqlconstraint, run, nside) print "runAstrom.go INFO - output directory will be %s" % (outDir) if not DoRun: continue # ensure the output directory actually exists... if not os.access(outDir, os.R_OK): print "runAstrom.go INFO - creating output directory %s" % (outDir) os.makedirs(outDir) resultsDb = db.ResultsDb(outDir=outDir) bundleList = [] # WIC - to make this at least somewhat uniform, build the plot # functions including arguments out of our copies above. plotFuncsPropmotion = copy.deepcopy(DPlotArgs['propmotion']) plotFuncsParallax = copy.deepcopy(DPlotArgs['parallax']) plotFuncsCoverage = copy.deepcopy(DPlotArgs['coverage']) plotFuncsHAdegen = copy.deepcopy(DPlotArgs['HAdegen']) # if using custom plot limits, will want to include the limits # for proper motion and parallax too... programming a bit defensively # here, including an extra check (rather than just the length of the lists # above). if CustomPlotLimits: if len(LUpperParallax) == len(runNames): plotFuncsParallax[1].defaultPlotDict['xMax'] = float(LUpperParallax[iRun]) if len(LUpperPropmotion) == len(runNames): plotFuncsPropmotion[1].defaultPlotDict['xMax'] = float(LUpperPropmotion[iRun]) # Configure the metrics metric = metrics.ParallaxMetric(rmag=rmag, seeingCol=seeingCol, SedTemplate=SedTemplate) bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, runName=run, # plotFuncs=plotFuncs, \ plotFuncs = plotFuncsParallax, \ summaryMetrics=summaryMetrics) bundleList.append(bundle) metric=metrics.ProperMotionMetric(rmag=rmag, seeingCol=seeingCol, SedTemplate=SedTemplate) bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, runName=run, # plotFuncs=plotFuncs, \ plotFuncs=plotFuncsPropmotion, \ summaryMetrics=summaryMetrics) bundleList.append(bundle) metric = calibrationMetrics.ParallaxCoverageMetric(rmag=rmag, seeingCol=seeingCol, SedTemplate=SedTemplate) bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, runName=run, # plotFuncs=plotFuncs, \ plotFuncs=plotFuncsCoverage, \ summaryMetrics=summaryMetrics) bundleList.append(bundle) # Now for the HA Degen metric. If testing the type of # correlation, call the metric differently here. Since the # argument to actually do this is only part of my github fork # at the moment, we use a different call. Running with default # arguments (checkCorrKind=False) should then work without # difficulty. metric = calibrationMetrics.ParallaxHADegenMetric(rmag=rmag, seeingCol=seeingCol, SedTemplate=SedTemplate) if checkCorrKind: metric = calibrationMetrics.ParallaxHADegenMetric(rmag=rmag, seeingCol=seeingCol, SedTemplate=SedTemplate, useSpearmanR=useSpearmanR[iRun]) print "TESTING CORRELATION KIND -- useSpearmanR", useSpearmanR[iRun] bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, runName=run, # plotFuncs=plotFuncs, \ plotFuncs=plotFuncsHAdegen, \ summaryMetrics=summaryMetrics) bundleList.append(bundle) # Run everything and make plots bundleDict = metricBundles.makeBundlesDictFromList(bundleList) bgroup = metricBundles.MetricBundleGroup(bundleDict, opsdb, outDir=outDir, resultsDb=resultsDb) # try: bgroup.runAll() print "runAstrom.go INFO - bundles took %.2f minutes" \ % ((time.time() - timeStartIteration) / 60.) # except KeyboardInterrupt: # print "runAstrom.go FATAL - keyboard interrupt detected. Halting." # return bgroup.plotAll() print "runAstrom.go INFO - bundles + plotting took %.2f minutes" \ % ((time.time() - timeStartIteration) / 60.) print "Finished entire set. %i runs took %.2f minutes." % (iRun + 1, (time.time()-tStart)/60.)
def go(nside=64, rmag=20., SedTemplate='flat', DoRun=False, LFilters = [], \ LNightMax=[], nightMax=1e4, \ CustomPlotLimits=True, \ RunOne=False, MaxRuns=1e3, \ SpatialClip=95.): # runNames = ['enigma_1189', 'ops2_1093'] # runNames #runNames = ['ops2_1092', 'kraken_1038', 'kraken_1034', 'ops2_1098'] #runNames = ['kraken_1038', 'kraken_1034', 'ops2_1098'] # 2015-12-23 - put kraken_1038 at the end, it seems to run # extremely slowly... runNames = ['enigma_1189', 'ops2_1098', 'kraken_1034', 'kraken_1038'] runNames = ['ops2_1092', 'kraken_1033', 'enigma_1271'] # UPDATE - ops2_1092 ran quite quickly on nside=32... rerun on 64 runNames = ['ops2_1092', 'enigma_1189', 'enigma_1271', 'kraken_1038'] # UPDATE 2015-12-28 -- run with single-filter choices, compare # enigma to ops2_1092 # WIC 2015-12-28 -- try with single-filter and all then small subset runNames = ['ops2_1092', 'ops2_1092', 'ops2_1092', 'enigma_1189', 'enigma_1189', 'enigma_1189'] LFilters = ["u", "y", '', "u", "y", ''] LNightMax = [1e4, 1e4, 730, 1e4, 1e4, 730] # WIC 2015-12-28 - 23:00 - try using a different SED template, # just go with single filters for now # # DO WE NEED THIS?? # WIC 2015-12-28 - 22:00; much to my surprise, that took less than # half an hour to go all the way through. Try again, this time using slightly more filters. runNames = ['ops2_1092', 'enigma_1189', 'ops2_1092', 'enigma_1189'] LFilters = ['', '', 'griz', 'griz'] # (griz was not recognized) # WIC 2015-12-29 - set up for a master-run with all cases, this time with plotting limits # Break the specifications across lines to make subdivision easier # Subsets by time first, then by filter, finally the whole shebang # (Yes the inversion of the first two is deliberate.) runNames = ['enigma_1189', 'ops2_1092', 'ops2_1092', 'enigma_1189', \ 'ops2_1092', 'enigma_1189', 'ops2_1092', 'enigma_1189', \ 'ops2_1092', 'enigma_1189'] LFilters = ['', '', '', '', \ 'u', 'u', 'y', 'y', \ '', ''] LNightMax = [365, 365, 730, 730, \ 1e4, 1e4, 1e4, 1e4, \ 1e4, 1e4] # List of upper limits to parallax and proper motion error. For parallax, 3.0 mas is probably good LUpperParallax = [] LUpperPropmotion = [] if CustomPlotLimits: LUpperParallax = [10, 10, 10, 10, \ 10, 10, 40, 40, \ 3.0, 3.0 ] # For proper motion, it's a little tricky to say because the # regular case is so pathological for the field. Try the following: LUpperPropmotion = [40, 40, 5, 20, \ 3.5, 20, 3.5, 20, \ 0.5, 5] print "runAstrom.go INFO - will run the following:" for iSho in range(len(runNames)): print "%i: %-12s, %1s, %i" % (iSho, runNames[iSho], LFilters[iSho], LNightMax[iSho]) print "===========================" # print runNames # if not DoRun: # print "Set DoRun=True to actually run this." # print len(LFilters), len(runNames), len(LFilters) == len(runNames) # return #'kraken_1038', 'kraken_1034', 'ops2_1098'] # nside = 64 slicer = slicers.HealpixSlicer(nside=nside) # Make it so we don't bother with the silly power spectra plotFuncs = [plots.HealpixSkyMap(), plots.HealpixHistogram()] # WIC - back up the plotting arguments with a default value plotFuncsPristine = copy.deepcopy(plotFuncs) # WIC - the only way this will make sense to me is if I make a # dictionary of plot arguments. Let's try it... DPlotArgs = {} for plotArg in ['parallax', 'propmotion', 'coverage', 'HAdegen']: DPlotArgs[plotArg] = copy.deepcopy(plotFuncs) if CustomPlotLimits: # All spatial maps use percentile clipping for plotMetric in DPlotArgs.keys(): DPlotArgs[plotMetric][0].defaultPlotDict['percentileClip'] = SpatialClip # Some limits common to spatial maps and histograms for iPl in range(0,2): DPlotArgs['propmotion'][iPl].defaultPlotDict['logScale'] = True # Standardized range for the histograms for new parallax metrics DPlotArgs['coverage'][1].defaultPlotDict['xMin'] = 0. DPlotArgs['coverage'][1].defaultPlotDict['xMax'] = 1. DPlotArgs['HAdegen'][1].defaultPlotDict['xMin'] = -1. DPlotArgs['HAdegen'][1].defaultPlotDict['xMax'] = 1. # Standardize at least the lower bound of the histogram in # both the proper motion and parallax errors. Upper limit we # can customize with a loop. DPlotArgs['propmotion'][1].defaultPlotDict['xMin'] = 1e-2 # should not be zero if log scale!! DPlotArgs['parallax'][1].defaultPlotDict['xMin'] = 0. # WIC - try changing the plot dictionary if not DoRun: plotFuncs[0].defaultPlotDict['logScale'] = True print DPlotArgs['propmotion'][0].defaultPlotDict print DPlotArgs['propmotion'][1].defaultPlotDict return # The old runs have the seeing in finSeeing seeingCol = 'finSeeing' # Try it out for a 20th mag star with a flat SED (can change mag # or to OBAFGKM) # rmag = 20. ## NOW AN ARGUMENT #SedTemplate='flat' # Use all the observations. Can change if you want a different # time span sqlconstraint = '' # list of sqlconstraints now used, which gets handled within the loop. # run some summary stats on everything summaryMetrics = [metrics.MedianMetric()] tStart = time.time() # Running one, or the whole lot? RunMax = len(runNames) # allow user to set a different number (say, 2) if MaxRuns < RunMax and MaxRuns > 0: RunMax = int(MaxRuns) # the following keyword overrides if RunOne: RunMax = 1 print "Starting runs. RunMax = %i" % (RunMax) for iRun in range(RunMax): run = runNames[iRun][:] # for run in runNames: # Open the OpSim database timeStartIteration = time.time() # Some syntax added to test for existence of the database dbFil = run+'_sqlite.db' if not os.access(dbFil, os.R_OK): print "runAstrom.go FATAL - cannot acces db file %s" % (dbFil) print "runAstrom.go FATAL - skipping run %s" % (run) continue else: deltaT = time.time()-tStart print "runAstrom.go INFO - ##################################" print "runAstrom.go INFO - starting run %s with nside=%i after %.2f minutes" \ % (run, nside, deltaT/60.) opsdb = db.OpsimDatabase(run+'_sqlite.db') # Set SQL constraint appropriate for each filter in the # list. If we supplied a list of filters, use it for sqlconstraint = '' ThisFilter = 'ugrizy' if len(LFilters) == len(runNames): # Only change the filter if one was actually supplied! if len(LFilters[iRun]) == 1: ThisFilter = LFilters[iRun] sqlconstraint = 'filter = "%s"' % (ThisFilter) # If nightmax was supplied, use it ThisNightMax = int(nightMax) # copy not view if len(LNightMax) == len(runNames): # Only update nightmax if one was given try: ThisNightMax = int(LNightMax[iRun]) # This might be redundant with the fmt statement below. if len(sqlconstraint) < 1: sqlconstraint = 'night < %i' % (ThisNightMax) else: sqlconstraint = '%s and night < %i' % (sqlconstraint, ThisNightMax) except: print "runAstrom.go WARN - run %i problem with NightMax" % (iRun) dumdum = 1. # Set where the output should go - include the filter!! sMag = '%.1f' % (rmag) sMag = sMag.replace(".","p") outDir = '%s_nside%i_%s_n%i_r%s' % (run, nside, ThisFilter, ThisNightMax, sMag) # Ensure we'll be able to find this later on... if CustomPlotLimits: outDir = '%s_lims' % (outDir) # From this point onwards, stuff actually gets run. This is # the place to output what will actually happen next. print "runAstrom.go INFO - about to run:" print "runAstrom.go INFO - sqlconstraint: %s ; run name %s ; nside %i" % (sqlconstraint, run, nside) print "runAstrom.go INFO - output directory will be %s" % (outDir) if not DoRun: continue resultsDb = db.ResultsDb(outDir=outDir) bundleList = [] # WIC - to make this at least somewhat uniform, build the plot # functions including arguments out of our copies above. plotFuncsPropmotion = copy.deepcopy(DPlotArgs['propmotion']) plotFuncsParallax = copy.deepcopy(DPlotArgs['parallax']) plotFuncsCoverage = copy.deepcopy(DPlotArgs['coverage']) plotFuncsHAdegen = copy.deepcopy(DPlotArgs['HAdegen']) # if using custom plot limits, will want to include the limits # for proper motion and parallax too... programming a bit defensively # here, including an extra check (rather than just the length of the lists # above). if CustomPlotLimits: if len(LUpperParallax) == len(runNames): plotFuncsParallax[1].defaultPlotDict['xMax'] = float(LUpperParallax[iRun]) if len(LUpperPropmotion) == len(runNames): plotFuncsPropmotion[1].defaultPlotDict['xMax'] = float(LUpperPropmotion[iRun]) # Configure the metrics metric = metrics.ParallaxMetric(rmag=rmag, seeingCol=seeingCol, SedTemplate=SedTemplate) bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, runName=run, # plotFuncs=plotFuncs, \ plotFuncs = plotFuncsParallax, \ summaryMetrics=summaryMetrics) bundleList.append(bundle) metric=metrics.ProperMotionMetric(rmag=rmag, seeingCol=seeingCol, SedTemplate=SedTemplate) bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, runName=run, # plotFuncs=plotFuncs, \ plotFuncs=plotFuncsPropmotion, \ summaryMetrics=summaryMetrics) bundleList.append(bundle) metric = metrics.ParallaxCoverageMetric(rmag=rmag, seeingCol=seeingCol, SedTemplate=SedTemplate) bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, runName=run, # plotFuncs=plotFuncs, \ plotFuncs=plotFuncsCoverage, \ summaryMetrics=summaryMetrics) bundleList.append(bundle) metric = metrics.ParallaxHADegenMetric(rmag=rmag, seeingCol=seeingCol, SedTemplate=SedTemplate) bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, runName=run, # plotFuncs=plotFuncs, \ plotFuncs=plotFuncsHAdegen, \ summaryMetrics=summaryMetrics) bundleList.append(bundle) # Run everything and make plots bundleDict = metricBundles.makeBundlesDictFromList(bundleList) bgroup = metricBundles.MetricBundleGroup(bundleDict, opsdb, outDir=outDir, resultsDb=resultsDb) # try: bgroup.runAll() print "runAstrom.go INFO - bundles took %.2f minutes" \ % ((time.time() - timeStartIteration) / 60.) # except KeyboardInterrupt: # print "runAstrom.go FATAL - keyboard interrupt detected. Halting." # return bgroup.plotAll() print "runAstrom.go INFO - bundles + plotting took %.2f minutes" \ % ((time.time() - timeStartIteration) / 60.) print "Finished entire set. %i runs took %.2f minutes." % (iRun + 1, (time.time()-tStart)/60.)
def astrometryBatch(colmap=None, runName='opsim', extraSql=None, extraMetadata=None, nside=64, ditherStacker=None, ditherkwargs=None): """Metrics for evaluating proper motion and parallax. Parameters ---------- colmap : dict or None, opt A dictionary with a mapping of column names. Default will use OpsimV4 column names. runName : str, opt The name of the simulated survey. Default is "opsim". nside : int, opt Nside for the healpix slicer. Default 64. extraSql : str or None, opt Additional sql constraint to apply to all metrics. extraMetadata : str or None, opt Additional metadata to apply to all results. ditherStacker: str or lsst.sims.maf.stackers.BaseDitherStacker Optional dither stacker to use to define ra/dec columns. ditherkwargs: dict, opt Optional dictionary of kwargs for the dither stacker. Returns ------- metricBundleDict """ if colmap is None: colmap = ColMapDict('opsimV4') bundleList = [] sql = '' metadata = 'All visits' # Add additional sql constraint (such as wfdWhere) and metadata, if provided. if (extraSql is not None) and (len(extraSql) > 0): sql = extraSql if extraMetadata is None: metadata = extraSql.replace('filter =', '').replace('filter=', '') metadata = metadata.replace('"', '').replace("'", '') if extraMetadata is not None: metadata = extraMetadata subgroup = metadata raCol, decCol, degrees, ditherStacker, ditherMeta = radecCols( ditherStacker, colmap, ditherkwargs) # Don't want dither info in subgroup (too long), but do want it in bundle name. metadata = combineMetadata(metadata, ditherMeta) rmags_para = [22.4, 24.0] rmags_pm = [20.5, 24.0] # Set up parallax/dcr stackers. parallaxStacker = stackers.ParallaxFactorStacker(raCol=raCol, decCol=decCol, dateCol=colmap['mjd'], degrees=degrees) dcrStacker = stackers.DcrStacker(filterCol=colmap['filter'], altCol=colmap['alt'], degrees=degrees, raCol=raCol, decCol=decCol, lstCol=colmap['lst'], site='LSST', mjdCol=colmap['mjd']) # Set up parallax metrics. slicer = slicers.HealpixSlicer(nside=nside, lonCol=raCol, latCol=decCol, latLonDeg=degrees) subsetPlots = [plots.HealpixSkyMap(), plots.HealpixHistogram()] displayDict = { 'group': 'Parallax', 'subgroup': subgroup, 'order': 0, 'caption': None } # Expected error on parallax at 10 AU. plotmaxVals = (2.0, 15.0) for rmag, plotmax in zip(rmags_para, plotmaxVals): plotDict = { 'xMin': 0, 'xMax': plotmax, 'colorMin': 0, 'colorMax': plotmax } metric = metrics.ParallaxMetric(metricName='Parallax Error @ %.1f' % (rmag), rmag=rmag, seeingCol=colmap['seeingGeom'], filterCol=colmap['filter'], m5Col=colmap['fiveSigmaDepth'], normalize=False) bundle = mb.MetricBundle(metric, slicer, sql, metadata=metadata, stackerList=[parallaxStacker, ditherStacker], displayDict=displayDict, plotDict=plotDict, summaryMetrics=standardSummary(), plotFuncs=subsetPlots) bundleList.append(bundle) displayDict['order'] += 1 # Parallax normalized to 'best possible' if all visits separated by 6 months. # This separates the effect of cadence from depth. for rmag in rmags_para: metric = metrics.ParallaxMetric( metricName='Normalized Parallax @ %.1f' % (rmag), rmag=rmag, seeingCol=colmap['seeingGeom'], filterCol=colmap['filter'], m5Col=colmap['fiveSigmaDepth'], normalize=True) bundle = mb.MetricBundle(metric, slicer, sql, metadata=metadata, stackerList=[parallaxStacker, ditherStacker], displayDict=displayDict, summaryMetrics=standardSummary(), plotFuncs=subsetPlots) bundleList.append(bundle) displayDict['order'] += 1 # Parallax factor coverage. for rmag in rmags_para: metric = metrics.ParallaxCoverageMetric( metricName='Parallax Coverage @ %.1f' % (rmag), rmag=rmag, m5Col=colmap['fiveSigmaDepth'], mjdCol=colmap['mjd'], filterCol=colmap['filter'], seeingCol=colmap['seeingGeom']) bundle = mb.MetricBundle(metric, slicer, sql, metadata=metadata, stackerList=[parallaxStacker, ditherStacker], displayDict=displayDict, summaryMetrics=standardSummary(), plotFuncs=subsetPlots) bundleList.append(bundle) displayDict['order'] += 1 # Parallax problems can be caused by HA and DCR degeneracies. Check their correlation. for rmag in rmags_para: metric = metrics.ParallaxDcrDegenMetric( metricName='Parallax-DCR degeneracy @ %.1f' % (rmag), rmag=rmag, seeingCol=colmap['seeingEff'], filterCol=colmap['filter'], m5Col=colmap['fiveSigmaDepth']) caption = 'Correlation between parallax offset magnitude and hour angle for a r=%.1f star.' % ( rmag) caption += ' (0 is good, near -1 or 1 is bad).' bundle = mb.MetricBundle( metric, slicer, sql, metadata=metadata, stackerList=[dcrStacker, parallaxStacker, ditherStacker], displayDict=displayDict, summaryMetrics=standardSummary(), plotFuncs=subsetPlots) bundleList.append(bundle) displayDict['order'] += 1 # Proper Motion metrics. displayDict = { 'group': 'Proper Motion', 'subgroup': subgroup, 'order': 0, 'caption': None } # Proper motion errors. plotmaxVals = (1.0, 5.0) for rmag, plotmax in zip(rmags_pm, plotmaxVals): plotDict = { 'xMin': 0, 'xMax': plotmax, 'colorMin': 0, 'colorMax': plotmax } metric = metrics.ProperMotionMetric( metricName='Proper Motion Error @ %.1f' % rmag, rmag=rmag, m5Col=colmap['fiveSigmaDepth'], mjdCol=colmap['mjd'], filterCol=colmap['filter'], seeingCol=colmap['seeingGeom'], normalize=False) bundle = mb.MetricBundle(metric, slicer, sql, metadata=metadata, stackerList=[ditherStacker], displayDict=displayDict, plotDict=plotDict, summaryMetrics=standardSummary(), plotFuncs=subsetPlots) bundleList.append(bundle) displayDict['order'] += 1 # Normalized proper motion. for rmag in rmags_pm: metric = metrics.ProperMotionMetric( metricName='Normalized Proper Motion @ %.1f' % rmag, rmag=rmag, m5Col=colmap['fiveSigmaDepth'], mjdCol=colmap['mjd'], filterCol=colmap['filter'], seeingCol=colmap['seeingGeom'], normalize=True) bundle = mb.MetricBundle(metric, slicer, sql, metadata=metadata, stackerList=[ditherStacker], displayDict=displayDict, summaryMetrics=standardSummary(), plotFuncs=subsetPlots) bundleList.append(bundle) displayDict['order'] += 1 # Set the runName for all bundles and return the bundleDict. for b in bundleList: b.setRunName(runName) return mb.makeBundlesDictFromList(bundleList)
def glanceBatch(colmap=None, runName='opsim', nside=64, filternames=('u', 'g', 'r', 'i', 'z', 'y'), nyears=10, pairnside=32, sqlConstraint=None): """Generate a handy set of metrics that give a quick overview of how well a survey performed. This is a meta-set of other batches, to some extent. Parameters ---------- colmap : dict, opt A dictionary with a mapping of column names. Default will use OpsimV4 column names. run_name : str, opt The name of the simulated survey. Default is "opsim". nside : int, opt The nside for the healpix slicers. Default 64. filternames : list of str, opt The list of individual filters to use when running metrics. Default is ('u', 'g', 'r', 'i', 'z', 'y'). There is always an all-visits version of the metrics run as well. nyears : int (10) How many years to attempt to make hourglass plots for pairnside : int (32) nside to use for the pair fraction metric (it's slow, so nice to use lower resolution) sqlConstraint : str or None, opt Additional SQL constraint to apply to all metrics. Returns ------- metricBundleDict """ if isinstance(colmap, str): raise ValueError('colmap must be a dictionary, not a string') if colmap is None: colmap = ColMapDict('opsimV4') bundleList = [] if sqlConstraint is None: sqlC = '' else: sqlC = '(%s) and' % sqlConstraint sql_per_filt = [ '%s %s="%s"' % (sqlC, colmap['filter'], filtername) for filtername in filternames ] sql_per_and_all_filters = [sqlConstraint] + sql_per_filt standardStats = standardSummary() subsetPlots = [plots.HealpixSkyMap(), plots.HealpixHistogram()] # Super basic things displayDict = {'group': 'Basic Stats', 'order': 1} sql = sqlConstraint slicer = slicers.UniSlicer() # Length of Survey metric = metrics.FullRangeMetric(col=colmap['mjd'], metricName='Length of Survey (days)') bundle = metricBundles.MetricBundle(metric, slicer, sql, displayDict=displayDict) bundleList.append(bundle) # Total number of filter changes metric = metrics.NChangesMetric(col=colmap['filter'], orderBy=colmap['mjd']) bundle = metricBundles.MetricBundle(metric, slicer, sql, displayDict=displayDict) bundleList.append(bundle) # Total open shutter fraction metric = metrics.OpenShutterFractionMetric( slewTimeCol=colmap['slewtime'], expTimeCol=colmap['exptime'], visitTimeCol=colmap['visittime']) bundle = metricBundles.MetricBundle(metric, slicer, sql, displayDict=displayDict) bundleList.append(bundle) # Total effective exposure time metric = metrics.TeffMetric(m5Col=colmap['fiveSigmaDepth'], filterCol=colmap['filter'], normed=True) for sql in sql_per_and_all_filters: bundle = metricBundles.MetricBundle(metric, slicer, sql, displayDict=displayDict) bundleList.append(bundle) # Number of observations, all and each filter metric = metrics.CountMetric(col=colmap['mjd'], metricName='Number of Exposures') for sql in sql_per_and_all_filters: bundle = metricBundles.MetricBundle(metric, slicer, sql, displayDict=displayDict) bundleList.append(bundle) # The alt/az plots of all the pointings slicer = slicers.HealpixSlicer(nside=nside, latCol='zenithDistance', lonCol=colmap['az'], latLonDeg=colmap['raDecDeg'], useCache=False) stacker = stackers.ZenithDistStacker(altCol=colmap['alt'], degrees=colmap['raDecDeg']) metric = metrics.CountMetric(colmap['mjd'], metricName='Nvisits as function of Alt/Az') plotFuncs = [plots.LambertSkyMap()] for sql in sql_per_and_all_filters: bundle = metricBundles.MetricBundle(metric, slicer, sql, plotFuncs=plotFuncs, displayDict=displayDict, stackerList=[stacker]) bundleList.append(bundle) # Things to check per night # Open Shutter per night displayDict = {'group': 'Pointing Efficency', 'order': 2} slicer = slicers.OneDSlicer(sliceColName=colmap['night'], binsize=1) metric = metrics.OpenShutterFractionMetric( slewTimeCol=colmap['slewtime'], expTimeCol=colmap['exptime'], visitTimeCol=colmap['visittime']) sql = sqlConstraint bundle = metricBundles.MetricBundle(metric, slicer, sql, summaryMetrics=standardStats, displayDict=displayDict) bundleList.append(bundle) # Number of filter changes per night slicer = slicers.OneDSlicer(sliceColName=colmap['night'], binsize=1) metric = metrics.NChangesMetric(col=colmap['filter'], orderBy=colmap['mjd'], metricName='Filter Changes') bundle = metricBundles.MetricBundle(metric, slicer, sql, summaryMetrics=standardStats, displayDict=displayDict) bundleList.append(bundle) # A few basic maps # Number of observations, coadded depths displayDict = {'group': 'Basic Maps', 'order': 3} slicer = slicers.HealpixSlicer(nside=nside, latCol=colmap['dec'], lonCol=colmap['ra'], latLonDeg=colmap['raDecDeg']) metric = metrics.CountMetric(col=colmap['mjd']) plotDict = {'percentileClip': 95.} for sql in sql_per_and_all_filters: bundle = metricBundles.MetricBundle(metric, slicer, sql, summaryMetrics=standardStats, displayDict=displayDict, plotDict=plotDict) bundleList.append(bundle) metric = metrics.Coaddm5Metric(m5Col=colmap['fiveSigmaDepth']) for sql in sql_per_and_all_filters: bundle = metricBundles.MetricBundle(metric, slicer, sql, summaryMetrics=standardStats, displayDict=displayDict) bundleList.append(bundle) # Checking a few basic science things # Maybe check astrometry, observation pairs, SN plotDict = {'percentileClip': 95.} displayDict = {'group': 'Science', 'subgroup': 'Astrometry', 'order': 4} stackerList = [] stacker = stackers.ParallaxFactorStacker(raCol=colmap['ra'], decCol=colmap['dec'], degrees=colmap['raDecDeg'], dateCol=colmap['mjd']) stackerList.append(stacker) # Maybe parallax and proper motion, fraction of visits in a good pair for SS displayDict['caption'] = r'Parallax precision of an $r=20$ flat SED star' metric = metrics.ParallaxMetric(m5Col=colmap['fiveSigmaDepth'], filterCol=colmap['filter'], seeingCol=colmap['seeingGeom']) sql = sqlConstraint bundle = metricBundles.MetricBundle(metric, slicer, sql, plotFuncs=subsetPlots, displayDict=displayDict, stackerList=stackerList, plotDict=plotDict) bundleList.append(bundle) displayDict[ 'caption'] = r'Proper motion precision of an $r=20$ flat SED star' metric = metrics.ProperMotionMetric(m5Col=colmap['fiveSigmaDepth'], mjdCol=colmap['mjd'], filterCol=colmap['filter'], seeingCol=colmap['seeingGeom']) bundle = metricBundles.MetricBundle(metric, slicer, sql, plotFuncs=subsetPlots, displayDict=displayDict, plotDict=plotDict) bundleList.append(bundle) # Solar system stuff displayDict['caption'] = 'Fraction of observations that are in pairs' displayDict['subgroup'] = 'Solar System' sql = '%s (filter="g" or filter="r" or filter="i")' % sqlC pairSlicer = slicers.HealpixSlicer(nside=pairnside, latCol=colmap['dec'], lonCol=colmap['ra'], latLonDeg=colmap['raDecDeg']) metric = metrics.PairFractionMetric(mjdCol=colmap['mjd']) bundle = metricBundles.MetricBundle(metric, pairSlicer, sql, plotFuncs=subsetPlots, displayDict=displayDict) bundleList.append(bundle) # stats from the note column if 'note' in colmap.keys(): displayDict = {'group': 'Basic Stats', 'subgroup': 'Percent stats'} metric = metrics.StringCountMetric(col=colmap['note'], percent=True, metricName='Percents') sql = '' slicer = slicers.UniSlicer() bundle = metricBundles.MetricBundle(metric, slicer, sql, displayDict=displayDict) bundleList.append(bundle) displayDict['subgroup'] = 'Count Stats' metric = metrics.StringCountMetric(col=colmap['note'], metricName='Counts') bundle = metricBundles.MetricBundle(metric, slicer, sql, displayDict=displayDict) bundleList.append(bundle) for b in bundleList: b.setRunName(runName) # Add hourglass plots. hrDict = hourglassBatch(colmap=colmap, runName=runName, nyears=nyears, extraSql=sqlConstraint) # Add basic slew stats. try: slewDict = slewBasics(colmap=colmap, runName=runName) except KeyError as e: warnings.warn( 'Could not add slew stats: missing required key %s from colmap' % (e)) bd = metricBundles.makeBundlesDictFromList(bundleList) bd.update(slewDict) bd.update(hrDict) return bd
def scienceRadarBatch(colmap=None, runName='', extraSql=None, extraMetadata=None, nside=64, benchmarkArea=18000, benchmarkNvisits=825, DDF=True): """A batch of metrics for looking at survey performance relative to the SRD and the main science drivers of LSST. Parameters ---------- """ # Hide dependencies from mafContrib.LSSObsStrategy.galaxyCountsMetric_extended import GalaxyCountsMetric_extended from mafContrib import Plasticc_metric, plasticc_slicer, load_plasticc_lc if colmap is None: colmap = ColMapDict('opsimV4') if extraSql is None: extraSql = '' if extraSql == '': joiner = '' else: joiner = ' and ' bundleList = [] healslicer = slicers.HealpixSlicer(nside=nside) subsetPlots = [plots.HealpixSkyMap(), plots.HealpixHistogram()] # Load up the plastic light curves models = ['SNIa-normal', 'KN'] plasticc_models_dict = {} for model in models: plasticc_models_dict[model] = list( load_plasticc_lc(model=model).values()) ######################### # SRD, DM, etc ######################### sql = extraSql displayDict = { 'group': 'SRD', 'subgroup': 'fO', 'order': 0, 'caption': None } metric = metrics.CountMetric(col=colmap['mjd'], metricName='fO') plotDict = { 'xlabel': 'Number of Visits', 'Asky': benchmarkArea, 'Nvisit': benchmarkNvisits, 'xMin': 0, 'xMax': 1500 } summaryMetrics = [ metrics.fOArea(nside=nside, norm=False, metricName='fOArea', Asky=benchmarkArea, Nvisit=benchmarkNvisits), metrics.fOArea(nside=nside, norm=True, metricName='fOArea/benchmark', Asky=benchmarkArea, Nvisit=benchmarkNvisits), metrics.fONv(nside=nside, norm=False, metricName='fONv', Asky=benchmarkArea, Nvisit=benchmarkNvisits), metrics.fONv(nside=nside, norm=True, metricName='fONv/benchmark', Asky=benchmarkArea, Nvisit=benchmarkNvisits) ] caption = 'The FO metric evaluates the overall efficiency of observing. ' caption += ( 'foNv: out of %.2f sq degrees, the area receives at least X and a median of Y visits ' '(out of %d, if compared to benchmark). ' % (benchmarkArea, benchmarkNvisits)) caption += ('fOArea: this many sq deg (out of %.2f sq deg if compared ' 'to benchmark) receives at least %d visits. ' % (benchmarkArea, benchmarkNvisits)) displayDict['caption'] = caption bundle = mb.MetricBundle(metric, healslicer, sql, plotDict=plotDict, displayDict=displayDict, summaryMetrics=summaryMetrics, plotFuncs=[plots.FOPlot()]) bundleList.append(bundle) displayDict['order'] += 1 displayDict = { 'group': 'SRD', 'subgroup': 'Gaps', 'order': 0, 'caption': None } plotDict = {'percentileClip': 95.} for filtername in 'ugrizy': sql = extraSql + joiner + 'filter ="%s"' % filtername metric = metrics.MaxGapMetric() summaryMetrics = [ metrics.PercentileMetric( percentile=95, metricName='95th percentile of Max gap, %s' % filtername) ] bundle = mb.MetricBundle(metric, healslicer, sql, plotFuncs=subsetPlots, summaryMetrics=summaryMetrics, displayDict=displayDict, plotDict=plotDict) bundleList.append(bundle) displayDict['order'] += 1 ######################### # Solar System ######################### # XXX -- may want to do Solar system seperatly # XXX--fraction of NEOs detected (assume some nominal size and albido) # XXX -- fraction of MBAs detected # XXX -- fraction of KBOs detected # XXX--any others? Planet 9s? Comets? Neptune Trojans? ######################### # Cosmology ######################### displayDict = { 'group': 'Cosmology', 'subgroup': 'galaxy counts', 'order': 0, 'caption': None } plotDict = {'percentileClip': 95.} sql = extraSql + joiner + 'filter="i"' metric = GalaxyCountsMetric_extended(filterBand='i', redshiftBin='all', nside=nside) summary = [ metrics.AreaSummaryMetric(area=18000, reduce_func=np.sum, decreasing=True, metricName='N Galaxies (WFD)') ] summary.append(metrics.SumMetric(metricName='N Galaxies (all)')) # make sure slicer has cache off slicer = slicers.HealpixSlicer(nside=nside, useCache=False) bundle = mb.MetricBundle(metric, slicer, sql, plotDict=plotDict, displayDict=displayDict, summaryMetrics=summary, plotFuncs=subsetPlots) bundleList.append(bundle) displayDict['order'] += 1 # let's put Type Ia SN in here displayDict['subgroup'] = 'SNe Ia' metadata = '' # XXX-- use the light curves from PLASTICC here displayDict['Caption'] = 'Fraction of normal SNe Ia' sql = '' slicer = plasticc_slicer(plcs=plasticc_models_dict['SNIa-normal'], seed=42, badval=0) metric = Plasticc_metric(metricName='SNIa') # Set the maskval so that we count missing objects as zero. summary_stats = [metrics.MeanMetric(maskVal=0)] plotFuncs = [plots.HealpixSkyMap()] bundle = mb.MetricBundle(metric, slicer, sql, runName=runName, summaryMetrics=summary_stats, plotFuncs=plotFuncs, metadata=metadata, displayDict=displayDict) bundleList.append(bundle) displayDict['order'] += 1 # XXX--need some sort of metric for weak lensing and camera rotation. ######################### # Variables and Transients ######################### displayDict = { 'group': 'Variables and Transients', 'subgroup': 'Periodic Stars', 'order': 0, 'caption': None } periods = [0.1, 0.5, 1., 2., 5., 10., 20.] # days plotDict = {} metadata = '' sql = extraSql displayDict[ 'Caption'] = 'Measure of how well a periodic signal can be measured combining amplitude and phase coverage. 1 is perfect, 0 is no way to fit' for period in periods: summary = metrics.PercentileMetric( percentile=10., metricName='10th %%-ile Periodic Quality, Period=%.1f days' % period) metric = metrics.PeriodicQualityMetric( period=period, starMag=20., metricName='Periodic Stars, P=%.1f d' % period) bundle = mb.MetricBundle(metric, healslicer, sql, metadata=metadata, displayDict=displayDict, plotDict=plotDict, plotFuncs=subsetPlots, summaryMetrics=summary) bundleList.append(bundle) displayDict['order'] += 1 # XXX add some PLASTICC metrics for kilovnova and tidal disruption events. displayDict['subgroup'] = 'KN' displayDict['caption'] = 'Fraction of Kilonova (from PLASTICC)' sql = '' slicer = plasticc_slicer(plcs=plasticc_models_dict['KN'], seed=43, badval=0) metric = Plasticc_metric(metricName='KN') summary_stats = [metrics.MeanMetric(maskVal=0)] plotFuncs = [plots.HealpixSkyMap()] bundle = mb.MetricBundle(metric, slicer, sql, runName=runName, summaryMetrics=summary_stats, plotFuncs=plotFuncs, metadata=metadata, displayDict=displayDict) bundleList.append(bundle) displayDict['order'] += 1 # XXX -- would be good to add some microlensing events, for both MW and LMC/SMC. ######################### # Milky Way ######################### # Let's do the proper motion, parallax, and DCR degen of a 20nd mag star rmag = 20. displayDict = { 'group': 'Milky Way', 'subgroup': 'Astrometry', 'order': 0, 'caption': None } sql = extraSql metadata = '' plotDict = {'percentileClip': 95.} metric = metrics.ParallaxMetric(metricName='Parallax Error r=%.1f' % (rmag), rmag=rmag, seeingCol=colmap['seeingGeom'], filterCol=colmap['filter'], m5Col=colmap['fiveSigmaDepth'], normalize=False) summary = [ metrics.AreaSummaryMetric(area=18000, reduce_func=np.median, decreasing=False, metricName='Median Parallax Error (WFD)') ] summary.append( metrics.PercentileMetric(percentile=95, metricName='95th Percentile Parallax Error')) bundle = mb.MetricBundle(metric, healslicer, sql, metadata=metadata, displayDict=displayDict, plotDict=plotDict, plotFuncs=subsetPlots, summaryMetrics=summary) bundleList.append(bundle) displayDict['order'] += 1 metric = metrics.ProperMotionMetric( metricName='Proper Motion Error r=%.1f' % rmag, rmag=rmag, m5Col=colmap['fiveSigmaDepth'], mjdCol=colmap['mjd'], filterCol=colmap['filter'], seeingCol=colmap['seeingGeom'], normalize=False) summary = [ metrics.AreaSummaryMetric( area=18000, reduce_func=np.median, decreasing=False, metricName='Median Proper Motion Error (WFD)') ] summary.append( metrics.PercentileMetric( metricName='95th Percentile Proper Motion Error')) bundle = mb.MetricBundle(metric, healslicer, sql, metadata=metadata, displayDict=displayDict, plotDict=plotDict, summaryMetrics=summary, plotFuncs=subsetPlots) bundleList.append(bundle) displayDict['order'] += 1 metric = metrics.ParallaxDcrDegenMetric( metricName='Parallax-DCR degeneracy r=%.1f' % (rmag), rmag=rmag, seeingCol=colmap['seeingEff'], filterCol=colmap['filter'], m5Col=colmap['fiveSigmaDepth']) caption = 'Correlation between parallax offset magnitude and hour angle for a r=%.1f star.' % ( rmag) caption += ' (0 is good, near -1 or 1 is bad).' # XXX--not sure what kind of summary to do here summary = [metrics.MeanMetric(metricName='Mean DCR Degeneracy')] bundle = mb.MetricBundle(metric, healslicer, sql, metadata=metadata, displayDict=displayDict, summaryMetrics=summary, plotFuncs=subsetPlots) bundleList.append(bundle) displayDict['order'] += 1 for b in bundleList: b.setRunName(runName) ######################### # DDF ######################### ddf_time_bundleDicts = [] if DDF: # Hide this import to avoid adding a dependency. from lsst.sims.featureScheduler.surveys import generate_dd_surveys ddf_surveys = generate_dd_surveys() # For doing a high-res sampling of the DDF for co-adds ddf_radius = 1.8 # Degrees ddf_nside = 512 ra, dec = hpid2RaDec(ddf_nside, np.arange(hp.nside2npix(ddf_nside))) displayDict = { 'group': 'DDF depths', 'subgroup': None, 'order': 0, 'caption': None } # Run the inter and intra gaps at the center of the DDFs for survey in ddf_surveys: slicer = slicers.UserPointsSlicer(ra=np.degrees(survey.ra), dec=np.degrees(survey.dec), useCamera=False) ddf_time_bundleDicts.append( interNight(colmap=colmap, slicer=slicer, runName=runName, nside=64, extraSql='note="%s"' % survey.survey_name, subgroup=survey.survey_name)[0]) ddf_time_bundleDicts.append( intraNight(colmap=colmap, slicer=slicer, runName=runName, nside=64, extraSql='note="%s"' % survey.survey_name, subgroup=survey.survey_name)[0]) for survey in ddf_surveys: displayDict['subgroup'] = survey.survey_name # Crop off the u-band only DDF if survey.survey_name[0:4] != 'DD:u': dist_to_ddf = angularSeparation(ra, dec, np.degrees(survey.ra), np.degrees(survey.dec)) goodhp = np.where(dist_to_ddf <= ddf_radius) slicer = slicers.UserPointsSlicer(ra=ra[goodhp], dec=dec[goodhp], useCamera=False) for filtername in ['u', 'g', 'r', 'i', 'z', 'y']: metric = metrics.Coaddm5Metric( metricName=survey.survey_name + ', ' + filtername) summary = [ metrics.MedianMetric(metricName='median depth ' + survey.survey_name + ', ' + filtername) ] sql = extraSql + joiner + 'filter = "%s"' % filtername bundle = mb.MetricBundle(metric, slicer, sql, metadata=metadata, displayDict=displayDict, summaryMetrics=summary, plotFuncs=[]) bundleList.append(bundle) displayDict['order'] += 1 displayDict = { 'group': 'DDF Transients', 'subgroup': None, 'order': 0, 'caption': None } for survey in ddf_surveys: displayDict['subgroup'] = survey.survey_name if survey.survey_name[0:4] != 'DD:u': slicer = plasticc_slicer( plcs=plasticc_models_dict['SNIa-normal'], seed=42, ra_cen=survey.ra, dec_cen=survey.dec, radius=np.radians(3.), useCamera=False) metric = Plasticc_metric(metricName=survey.survey_name + ' SNIa') sql = '' summary_stats = [metrics.MeanMetric(maskVal=0)] plotFuncs = [plots.HealpixSkyMap()] bundle = mb.MetricBundle(metric, slicer, sql, runName=runName, summaryMetrics=summary_stats, plotFuncs=plotFuncs, metadata=metadata, displayDict=displayDict) bundleList.append(bundle) displayDict['order'] += 1 for b in bundleList: b.setRunName(runName) bundleDict = mb.makeBundlesDictFromList(bundleList) intraDict = intraNight(colmap=colmap, runName=runName, nside=nside, extraSql=extraSql, extraMetadata=extraMetadata)[0] interDict = interNight(colmap=colmap, runName=runName, nside=nside, extraSql=extraSql, extraMetadata=extraMetadata)[0] bundleDict.update(intraDict) bundleDict.update(interDict) for ddf_time in ddf_time_bundleDicts: bundleDict.update(ddf_time) return bundleDict
def astrometryBatch(colmap=None, runName='opsim', extraSql=None, extraMetadata=None, nside=64): # Allow user to add dithering. if colmap is None: colmap = ColMapDict('opsimV4') bundleList = [] sql = '' metadata = 'All visits' # Add additional sql constraint (such as wfdWhere) and metadata, if provided. if (extraSql is not None) and (len(extraSql) > 0): sql = extraSql if extraMetadata is None: metadata = extraSql.replace('filter =', '').replace('filter=', '') metadata = metadata.replace('"', '').replace("'", '') if extraMetadata is not None: metadata = extraMetadata subgroup = metadata raCol = colmap['ra'] decCol = colmap['dec'] degrees = colmap['raDecDeg'] # Set up stackers. parallaxStacker = stackers.ParallaxFactorStacker(raCol=raCol, decCol=decCol, dateCol=colmap['mjd'], degrees=degrees) dcrStacker = stackers.DcrStacker(filterCol=colmap['filter'], altCol=colmap['alt'], degrees=degrees, raCol=raCol, decCol=decCol, lstCol=colmap['lst'], site='LSST', mjdCol=colmap['mjd']) # Set up parallax metrics. slicer = slicers.HealpixSlicer(nside=nside, lonCol=raCol, latCol=decCol, latLonDeg=degrees) subsetPlots = [plots.HealpixSkyMap(), plots.HealpixHistogram()] displayDict = {'group': 'Parallax', 'subgroup': subgroup, 'order': 0, 'caption': None} # Expected error on parallax at 10 AU. for rmag in (20.0, 24.0): metric = metrics.ParallaxMetric(metricName='Parallax @ %.1f' % (rmag), rmag=rmag, seeingCol=colmap['seeingGeom'], filterCol=colmap['filter'], m5Col=colmap['fiveSigmaDepth'], normalize=False) bundle = mb.MetricBundle(metric, slicer, sql, metadata=metadata, stackerList=[parallaxStacker], displayDict=displayDict, summaryMetrics=standardSummary(), plotFuncs=subsetPlots) bundleList.append(bundle) displayDict['order'] += 1 # Parallax normalized to 'best possible' if all visits separated by 6 months. # This separates the effect of cadence from depth. for rmag in (20.0, 24.0): metric = metrics.ParallaxMetric(metricName='Normalized Parallax @ %.1f' % (rmag), rmag=rmag, seeingCol=colmap['seeingGeom'], filterCol=colmap['filter'], m5Col=colmap['fiveSigmaDepth'], normalize=True) bundle = mb.MetricBundle(metric, slicer, sql, metadata=metadata, stackerList=[parallaxStacker], displayDict=displayDict, summaryMetrics=standardSummary(), plotFuncs=subsetPlots) bundleList.append(bundle) displayDict['order'] += 1 # Parallax factor coverage. for rmag in (20.0, 24.0): metric = metrics.ParallaxCoverageMetric(metricName='Parallax Coverage @ %.1f' % (rmag), rmag=rmag, m5Col=colmap['fiveSigmaDepth'], mjdCol=colmap['mjd'], filterCol=colmap['filter'], seeingCol=colmap['seeingGeom']) bundle = mb.MetricBundle(metric, slicer, sql, metadata=metadata, stackerList=[parallaxStacker], displayDict=displayDict, summaryMetrics=standardSummary(), plotFuncs=subsetPlots) bundleList.append(bundle) displayDict['order'] += 1 # Parallax problems can be caused by HA and DCR degeneracies. Check their correlation. for rmag in (20.0, 24.0): metric = metrics.ParallaxDcrDegenMetric(metricName='Parallax-DCR degeneracy @ %.1f' % (rmag), rmag=rmag, seeingCol=colmap['seeingEff'], filterCol=colmap['filter'], m5Col=colmap['fiveSigmaDepth']) caption = 'Correlation between parallax offset magnitude and hour angle for a r=%.1f star.' % (rmag) caption += ' (0 is good, near -1 or 1 is bad).' bundle = mb.MetricBundle(metric, slicer, sql, metadata=metadata, stackerList=[dcrStacker, parallaxStacker], displayDict=displayDict, summaryMetrics=standardSummary(), plotFuncs=subsetPlots) bundleList.append(bundle) displayDict['order'] += 1 # Proper Motion metrics. displayDict = {'group': 'Proper Motion', 'subgroup': subgroup, 'order': 0, 'caption': None} # Proper motion errors. for rmag in (20.0, 24.0): metric = metrics.ProperMotionMetric(metricName='Proper Motion %.1f' % rmag, rmag=rmag, m5Col=colmap['fiveSigmaDepth'], mjdCol=colmap['mjd'], filterCol=colmap['filter'], seeingCol=colmap['seeingGeom'], normalize=False) bundle = mb.MetricBundle(metric, slicer, sql, metadata=metadata, displayDict=displayDict, summaryMetrics=standardSummary(), plotFuncs=subsetPlots) bundleList.append(bundle) displayDict['order'] += 1 # Normalized proper motion. for rmag in (20.0, 24.0): metric = metrics.ProperMotionMetric(metricName='Normalized Proper Motion %.1f' % rmag, rmag=rmag, m5Col=colmap['fiveSigmaDepth'], mjdCol=colmap['mjd'], filterCol=colmap['filter'], seeingCol=colmap['seeingGeom'], normalize=True) bundle = mb.MetricBundle(metric, slicer, sql, metadata=metadata, displayDict=displayDict, summaryMetrics=standardSummary(), plotFuncs=subsetPlots) bundleList.append(bundle) displayDict['order'] += 1 # Set the runName for all bundles and return the bundleDict. for b in bundleList: b.setRunName(runName) return mb.makeBundlesDictFromList(bundleList)