def meanRADec(colmap=None, runName='opsim', extraSql=None, extraMetadata=None): """Plot the range of RA/Dec as a function of night. Parameters ---------- colmap : dict, opt A dictionary with a mapping of column names. Default will use OpsimV4 column names. runName : str, opt The name of the simulated survey. Default is "opsim". extraSql : str, opt Additional constraint to add to any sql constraints (e.g. 'night<365') Default None, for no additional constraints. extraMetadata : str, opt Additional metadata to add before any below (i.e. "WFD"). Default is None. """ if colmap is None: colmap = ColMapDict('opsimV4') bundleList = [] plotBundles = [] group = 'RA Dec coverage' subgroup = 'All visits' if extraMetadata is not None: subgroup = extraMetadata displayDict = {'group': group, 'subgroup': subgroup, 'order': 0} ra_metrics = [metrics.MeanAngleMetric(colmap['ra']), metrics.FullRangeAngleMetric(colmap['ra'])] dec_metrics = [metrics.MeanMetric(colmap['dec']), metrics.MinMetric(colmap['dec']), metrics.MaxMetric(colmap['dec'])] for m in ra_metrics: slicer = slicers.OneDSlicer(sliceColName=colmap['night'], binsize=1) if not colmap['raDecDeg']: plotDict = {'yMin': np.radians(-5), 'yMax': np.radians(365)} else: plotDict = {'yMin': -5, 'yMax': 365} bundle = mb.MetricBundle(m, slicer, extraSql, metadata=extraMetadata, displayDict=displayDict, plotDict=plotDict) bundleList.append(bundle) for m in dec_metrics: slicer = slicers.OneDSlicer(sliceColName=colmap['night'], binsize=1) bundle = mb.MetricBundle(m, slicer, extraSql, metadata=extraMetadata, displayDict=displayDict) bundleList.append(bundle) # Set the runName for all bundles and return the bundleDict. for b in bundleList: b.setRunName(runName) return mb.makeBundlesDictFromList(bundleList), plotBundles
def nvisitsPerNight(colmap=None, runName='opsim', binNights=1, sqlConstraint=None, metadata=None): """Count the number of visits per night through the survey. Parameters ---------- colmap : dict or None, opt A dictionary with a mapping of column names. Default will use OpsimV4 column names. runName : str, opt The name of the simulated survey. Default is "opsim". binNights : int, opt Number of nights to count in each bin. Default = 1, count number of visits in each night. sqlConstraint : str or None, opt Additional constraint to add to any sql constraints (e.g. 'propId=1' or 'fieldID=522'). Default None, for no additional constraints. metadata : str or None, opt Additional metadata to add before any below (i.e. "WFD"). Default is None. Returns ------- metricBundleDict """ if colmap is None: colmap = ColMapDict('opsimV4') subgroup = metadata if subgroup is None: subgroup = 'All visits' metadataCaption = metadata if metadata is None: if sqlConstraint is not None: metadataCaption = sqlConstraint else: metadataCaption = 'all visits' bundleList = [] displayDict = {'group': 'Per Night', 'subgroup': subgroup} displayDict['caption'] = 'Number of visits per night for %s.' % ( metadataCaption) displayDict['order'] = 0 metric = metrics.CountMetric(colmap['mjd'], metricName='Nvisits') slicer = slicers.OneDSlicer(sliceColName=colmap['mjd'], binsize=int(binNights)) bundle = mb.MetricBundle(metric, slicer, sqlConstraint, metadata=metadata, displayDict=displayDict, summaryMetrics=standardSummary()) bundleList.append(bundle) # Set the runName for all bundles and return the bundleDict. for b in bundleList: b.setRunName(runName) return mb.makeBundlesDictFromList(bundleList)
def filtersPerNight(colmap=None, runName='opsim', nights=1, extraSql=None, extraMetadata=None): """Generate a set of metrics measuring the number and rate of filter changes over a given span of nights. Parameters ---------- colmap : dict, opt A dictionary with a mapping of column names. Default will use OpsimV4 column names. run_name : str, opt The name of the simulated survey. Default is "opsim". nights : int, opt Size of night bin to use when calculating metrics. Default is 1. extraSql : str, opt Additional constraint to add to any sql constraints (e.g. 'propId=1' or 'fieldID=522'). Default None, for no additional constraints. extraMetadata : str, opt Additional metadata to add before any below (i.e. "WFD"). Default is None. Returns ------- metricBundleDict """ if colmap is None: colmap = ColMapDict('opsimV4') bundleList = [] # Set up sql and metadata, if passed any additional information. sql = '' metadata = 'Per' if nights == 1: metadata += ' Night' else: metadata += ' %s Nights' % nights metacaption = metadata.lower() if (extraSql is not None) and (len(extraSql) > 0): sql = extraSql if extraMetadata is None: metadata += ' %s' % extraSql metacaption += ', with %s selection' % extraSql if extraMetadata is not None: metadata += ' %s' % extraMetadata metacaption += ', %s only' % extraMetadata metacaption += '.' displayDict = {'group': 'Filter Changes', 'subgroup': metadata} summaryStats = standardSummary() slicer = slicers.OneDSlicer(sliceColName=colmap['night'], binsize=nights) metricList, captionList = setupMetrics(colmap) for m, caption in zip(metricList, captionList): displayDict['caption'] = caption + metacaption bundle = mb.MetricBundle(m, slicer, sql, runName=runName, metadata=metadata, displayDict=displayDict, summaryMetrics=summaryStats) bundleList.append(bundle) for b in bundleList: b.setRunName(runName) return mb.makeBundlesDictFromList(bundleList)
def test_oneDSlicer(self): slicer = slicers.OneDSlicer(sliceColName='testdata') dataValues = np.zeros(10000, dtype=[('testdata', 'float')]) dataValues['testdata'] = np.random.rand(10000) slicer.setupSlicer(dataValues) with lsst.utils.tests.getTempFilePath('.npz') as filename: slicer.writeData(filename, dataValues[:100]) dataBack, slicerBack, header = self.baseslicer.readData(filename) assert (slicer == slicerBack) # np.testing.assert_almost_equal(dataBack,dataValues[:100]) attr2check = ['nslice', 'columnsNeeded'] for att in attr2check: if type(getattr(slicer, att)).__module__ == 'numpy': np.testing.assert_almost_equal(getattr(slicer, att), getattr(slicerBack, att)) else: assert (getattr(slicer, att) == getattr(slicerBack, att))
def eastWestBias(colmap=None, runName='opsim', extraSql=None, extraMetadata=None): """Plot the number of observations to the east vs to the west, per night. Parameters ---------- colmap : dict, opt A dictionary with a mapping of column names. Default will use OpsimV4 column names. runName : str, opt The name of the simulated survey. Default is "opsim". extraSql : str, opt Additional constraint to add to any sql constraints (e.g. 'night<365') Default None, for no additional constraints. extraMetadata : str, opt Additional metadata to add before any below (i.e. "WFD"). Default is None. """ if colmap is None: colmap = ColMapDict('opsimV4') bundleList = [] plotBundles = [] group = 'East vs West' subgroup = 'All visits' if extraMetadata is not None: subgroup = extraMetadata displayDict = {'group': group, 'subgroup': subgroup, 'order': 0} eastvswest = 180 if not colmap['raDecDeg']: eastvswest = np.radians(eastvswest) displayDict['caption'] = 'Number of visits per night that occur with azimuth <= 180.' if extraSql is not None: displayDict['caption'] += ' With additional sql constraint %s.' % extraSql metric = metrics.CountMetric(colmap['night'], metricName='Nvisits East') slicer = slicers.OneDSlicer(sliceColName=colmap['night'], binsize=1) sql = '%s <= %f' % (colmap['az'], eastvswest) if extraSql is not None: sql = '(%s) and (%s)' % (sql, extraSql) plotDict = {'color': 'orange', 'label': 'East'} bundle = mb.MetricBundle(metric, slicer, sql, metadata=extraMetadata, displayDict=displayDict, plotDict=plotDict) bundleList.append(bundle) displayDict['caption'] = 'Number of visits per night that occur with azimuth > 180.' if extraSql is not None: displayDict['caption'] += ' With additional sql constraint %s.' % extraSql metric = metrics.CountMetric(colmap['night'], metricName='Nvisits West') slicer = slicers.OneDSlicer(sliceColName=colmap['night'], binsize=1) sql = '%s > %f' % (colmap['az'], eastvswest) if extraSql is not None: sql = '(%s) and (%s)' % (sql, extraSql) plotDict = {'color': 'blue', 'label': 'West'} bundle = mb.MetricBundle(metric, slicer, sql, metadata=extraMetadata, displayDict=displayDict, plotDict=plotDict) bundleList.append(bundle) # Set the runName for all bundles and return the bundleDict. for b in bundleList: b.setRunName(runName) return mb.makeBundlesDictFromList(bundleList), plotBundles
def setUp(self): # Set up a slicer and some metric data for that slicer. dv = makeDataValues(1000, random=40082) self.testslicer = slicers.OneDSlicer(sliceColName='testdata') self.testslicer.setupSlicer(dv)
def metadataBasicsAngle(value, colmap=None, runName='opsim', valueName=None, groupName=None, extraSql=None, extraMetadata=None, nside=64, ditherStacker=None, ditherkwargs=None): """Calculate basic metrics on visit metadata 'value', where value is a wrap-around angle. Calculates extended standard metrics (with unislicer) on the quantity (all visits and per filter), makes histogram of the value (all visits and per filter), Parameters ---------- value : str The column name for the quantity to evaluate. (column name in the database or created by a stacker). colmap : dict or None, opt A dictionary with a mapping of column names. Default will use OpsimV4 column names. runName : str, opt The name of the simulated survey. Default is "opsim". valueName : str, opt The name of the value to be reported in the resultsDb and added to the metric. This is intended to help standardize metric comparison between sim versions. value = name as it is in the database (seeingFwhmGeom, etc). valueName = name to be recorded ('seeingGeom', etc.). Default is None, which will match 'value'. groupName : str, opt The group name for this quantity in the displayDict. Default is the same as 'valueName', capitalized. extraSql : str, opt Additional constraint to add to any sql constraints (e.g. 'propId=1' or 'fieldID=522'). Default None, for no additional constraints. extraMetadata : str, opt Additional metadata to add before any below (i.e. "WFD"). Default is None. nside : int, opt Nside value for healpix slicer. Default 64. If "None" is passed, the healpixslicer-based metrics will be skipped. ditherStacker: str or lsst.sims.maf.stackers.BaseDitherStacker Optional dither stacker to use to define ra/dec columns. ditherkwargs: dict, opt Optional dictionary of kwargs for the dither stacker. Returns ------- metricBundleDict """ if colmap is None: colmap = ColMapDict('opsimV4') bundleList = [] if valueName is None: valueName = value if groupName is None: groupName = valueName.capitalize() subgroup = extraMetadata else: groupName = groupName.capitalize() subgroup = valueName.capitalize() if subgroup is None: subgroup = 'All visits' displayDict = {'group': groupName, 'subgroup': subgroup} raCol, decCol, degrees, ditherStacker, ditherMeta = radecCols( ditherStacker, colmap, ditherkwargs) extraMetadata = combineMetadata(extraMetadata, ditherMeta) # Set up basic all and per filter sql constraints. filterlist, colors, orders, sqls, metadata = filterList( all=True, extraSql=extraSql, extraMetadata=extraMetadata) stackerList = [ditherStacker] # Summarize values over all and per filter. slicer = slicers.UniSlicer() for f in filterlist: for m in standardAngleMetrics(value, replace_colname=valueName): displayDict['caption'] = '%s for %s.' % (m.name, metadata[f]) displayDict['order'] = orders[f] bundle = mb.MetricBundle(m, slicer, sqls[f], stackerList=stackerList, metadata=metadata[f], displayDict=displayDict) bundleList.append(bundle) # Histogram values over all and per filter. for f in filterlist: displayDict['caption'] = 'Histogram of %s' % (value) if valueName != value: displayDict['caption'] += ' (%s)' % (valueName) displayDict['caption'] += ' for %s.' % (metadata[f]) displayDict['order'] = orders[f] m = metrics.CountMetric(value, metricName='%s Histogram' % (valueName)) slicer = slicers.OneDSlicer(sliceColName=value) bundle = mb.MetricBundle(m, slicer, sqls[f], stackerList=stackerList, metadata=metadata[f], displayDict=displayDict) bundleList.append(bundle) # Make maps of min/median/max for all and per filter, per RA/Dec, with standard summary stats. mList = [] mList.append( metrics.MeanAngleMetric(value, metricName='AngleMean %s' % (valueName))) mList.append( metrics.FullRangeAngleMetric(value, metricName='AngleRange %s' % (valueName))) mList.append( metrics.RmsAngleMetric(value, metricName='AngleRms %s' % (valueName))) slicer = slicers.HealpixSlicer(nside=nside, latCol=decCol, lonCol=raCol, latLonDeg=degrees) subsetPlots = [plots.HealpixSkyMap(), plots.HealpixHistogram()] for f in filterlist: for m in mList: displayDict['caption'] = 'Map of %s' % m.name if valueName != value: displayDict['caption'] += ' (%s)' % value displayDict['caption'] += ' for %s.' % metadata[f] displayDict['order'] = orders[f] bundle = mb.MetricBundle(m, slicer, sqls[f], stackerList=stackerList, metadata=metadata[f], plotFuncs=subsetPlots, displayDict=displayDict, summaryMetrics=standardSummary()) bundleList.append(bundle) # Set the runName for all bundles and return the bundleDict. for b in bundleList: b.setRunName(runName) return mb.makeBundlesDictFromList(bundleList)
def metadataBasics(value, colmap=None, runName='opsim', valueName=None, groupName=None, extraSql=None, extraMetadata=None, nside=64): """Calculate basic metrics on visit metadata 'value' (e.g. airmass, normalized airmass, seeing..). Calculates this around the sky (HealpixSlicer), makes histograms of all visits (OneDSlicer), and calculates statistics on all visits (UniSlicer) for the quantity in all visits and per filter. TODO: handle stackers which need configuration (degrees, in particular) more automatically. Currently have a hack for HA & normairmass. Parameters ---------- value : str The column name for the quantity to evaluate. (column name in the database or created by a stacker). colmap : dict or None, opt A dictionary with a mapping of column names. Default will use OpsimV4 column names. runName : str, opt The name of the simulated survey. Default is "opsim". valueName : str, opt The name of the value to be reported in the resultsDb and added to the metric. This is intended to help standardize metric comparison between sim versions. value = name as it is in the database (seeingFwhmGeom, etc). valueName = name to be recorded ('seeingGeom', etc.). Default is None, which will match 'value'. groupName : str, opt The group name for this quantity in the displayDict. Default is the same as 'valueName', capitalized. extraSql : str, opt Additional constraint to add to any sql constraints (e.g. 'propId=1' or 'fieldID=522'). Default None, for no additional constraints. extraMetadata : str, opt Additional metadata to add before any below (i.e. "WFD"). Default is None. nside : int, opt Nside value for healpix slicer. Default 64. If "None" is passed, the healpixslicer-based metrics will be skipped. Returns ------- metricBundleDict """ if colmap is None: colmap = ColMapDict('fbs') bundleList = [] if valueName is None: valueName = value if groupName is None: groupName = valueName.capitalize() subgroup = extraMetadata else: groupName = groupName.capitalize() subgroup = valueName.capitalize() if subgroup is None: subgroup = 'All visits' displayDict = {'group': groupName, 'subgroup': subgroup} raCol, decCol, degrees, ditherStacker, ditherMeta = radecCols( None, colmap, None) extraMetadata = combineMetadata(extraMetadata, ditherMeta) # Set up basic all and per filter sql constraints. filterlist, colors, orders, sqls, metadata = filterList( all=True, extraSql=extraSql, extraMetadata=extraMetadata) # Hack to make HA work, but really I need to account for any stackers/colmaps. if value == 'HA': stackerList = [ stackers.HourAngleStacker(lstCol=colmap['lst'], raCol=raCol, degrees=degrees) ] elif value == 'normairmass': stackerList = [stackers.NormAirmassStacker(degrees=degrees)] else: stackerList = None # Summarize values over all and per filter (min/mean/median/max/percentiles/outliers/rms). slicer = slicers.UniSlicer() for f in filterlist: for m in extendedMetrics(value, replace_colname=valueName): displayDict['caption'] = '%s for %s.' % (m.name, metadata[f]) displayDict['order'] = orders[f] bundle = mb.MetricBundle(m, slicer, sqls[f], stackerList=stackerList, metadata=metadata[f], displayDict=displayDict) bundleList.append(bundle) # Histogram values over all and per filter. for f in filterlist: displayDict['caption'] = 'Histogram of %s' % (value) if valueName != value: displayDict['caption'] += ' (%s)' % (valueName) displayDict['caption'] += ' for %s.' % (metadata[f]) displayDict['order'] = orders[f] m = metrics.CountMetric(value, metricName='%s Histogram' % (valueName)) slicer = slicers.OneDSlicer(sliceColName=value) bundle = mb.MetricBundle(m, slicer, sqls[f], stackerList=stackerList, metadata=metadata[f], displayDict=displayDict) bundleList.append(bundle) # Make maps of min/median/max for all and per filter, per RA/Dec, with standard summary stats. mList = [] mList.append(metrics.MinMetric(value, metricName='Min %s' % (valueName))) mList.append( metrics.MedianMetric(value, metricName='Median %s' % (valueName))) mList.append(metrics.MaxMetric(value, metricName='Max %s' % (valueName))) slicer = slicers.HealpixSlicer(nside=nside, latCol=decCol, lonCol=raCol, latLonDeg=degrees) subsetPlots = [plots.HealpixSkyMap(), plots.HealpixHistogram()] for f in filterlist: for m in mList: displayDict['caption'] = 'Map of %s' % m.name if valueName != value: displayDict['caption'] += ' (%s)' % value displayDict['caption'] += ' for %s.' % metadata[f] displayDict['order'] = orders[f] bundle = mb.MetricBundle(m, slicer, sqls[f], stackerList=stackerList, metadata=metadata[f], plotFuncs=subsetPlots, displayDict=displayDict, summaryMetrics=standardSummary()) bundleList.append(bundle) # Set the runName for all bundles and return the bundleDict. for b in bundleList: b.setRunName(runName) return mb.makeBundlesDictFromList(bundleList)
def glanceBatch(colmap=None, runName='opsim', nside=64, filternames=('u', 'g', 'r', 'i', 'z', 'y'), nyears=10, pairnside=32, sqlConstraint=None): """Generate a handy set of metrics that give a quick overview of how well a survey performed. This is a meta-set of other batches, to some extent. Parameters ---------- colmap : dict, opt A dictionary with a mapping of column names. Default will use OpsimV4 column names. run_name : str, opt The name of the simulated survey. Default is "opsim". nside : int, opt The nside for the healpix slicers. Default 64. filternames : list of str, opt The list of individual filters to use when running metrics. Default is ('u', 'g', 'r', 'i', 'z', 'y'). There is always an all-visits version of the metrics run as well. nyears : int (10) How many years to attempt to make hourglass plots for pairnside : int (32) nside to use for the pair fraction metric (it's slow, so nice to use lower resolution) sqlConstraint : str or None, opt Additional SQL constraint to apply to all metrics. Returns ------- metricBundleDict """ if isinstance(colmap, str): raise ValueError('colmap must be a dictionary, not a string') if colmap is None: colmap = ColMapDict('opsimV4') bundleList = [] if sqlConstraint is None: sqlC = '' else: sqlC = '(%s) and' % sqlConstraint sql_per_filt = [ '%s %s="%s"' % (sqlC, colmap['filter'], filtername) for filtername in filternames ] sql_per_and_all_filters = [sqlConstraint] + sql_per_filt standardStats = standardSummary() subsetPlots = [plots.HealpixSkyMap(), plots.HealpixHistogram()] # Super basic things displayDict = {'group': 'Basic Stats', 'order': 1} sql = sqlConstraint slicer = slicers.UniSlicer() # Length of Survey metric = metrics.FullRangeMetric(col=colmap['mjd'], metricName='Length of Survey (days)') bundle = metricBundles.MetricBundle(metric, slicer, sql, displayDict=displayDict) bundleList.append(bundle) # Total number of filter changes metric = metrics.NChangesMetric(col=colmap['filter'], orderBy=colmap['mjd']) bundle = metricBundles.MetricBundle(metric, slicer, sql, displayDict=displayDict) bundleList.append(bundle) # Total open shutter fraction metric = metrics.OpenShutterFractionMetric( slewTimeCol=colmap['slewtime'], expTimeCol=colmap['exptime'], visitTimeCol=colmap['visittime']) bundle = metricBundles.MetricBundle(metric, slicer, sql, displayDict=displayDict) bundleList.append(bundle) # Total effective exposure time metric = metrics.TeffMetric(m5Col=colmap['fiveSigmaDepth'], filterCol=colmap['filter'], normed=True) for sql in sql_per_and_all_filters: bundle = metricBundles.MetricBundle(metric, slicer, sql, displayDict=displayDict) bundleList.append(bundle) # Number of observations, all and each filter metric = metrics.CountMetric(col=colmap['mjd'], metricName='Number of Exposures') for sql in sql_per_and_all_filters: bundle = metricBundles.MetricBundle(metric, slicer, sql, displayDict=displayDict) bundleList.append(bundle) # The alt/az plots of all the pointings slicer = slicers.HealpixSlicer(nside=nside, latCol='zenithDistance', lonCol=colmap['az'], latLonDeg=colmap['raDecDeg'], useCache=False) stacker = stackers.ZenithDistStacker(altCol=colmap['alt'], degrees=colmap['raDecDeg']) metric = metrics.CountMetric(colmap['mjd'], metricName='Nvisits as function of Alt/Az') plotFuncs = [plots.LambertSkyMap()] for sql in sql_per_and_all_filters: bundle = metricBundles.MetricBundle(metric, slicer, sql, plotFuncs=plotFuncs, displayDict=displayDict, stackerList=[stacker]) bundleList.append(bundle) # Things to check per night # Open Shutter per night displayDict = {'group': 'Pointing Efficency', 'order': 2} slicer = slicers.OneDSlicer(sliceColName=colmap['night'], binsize=1) metric = metrics.OpenShutterFractionMetric( slewTimeCol=colmap['slewtime'], expTimeCol=colmap['exptime'], visitTimeCol=colmap['visittime']) sql = sqlConstraint bundle = metricBundles.MetricBundle(metric, slicer, sql, summaryMetrics=standardStats, displayDict=displayDict) bundleList.append(bundle) # Number of filter changes per night slicer = slicers.OneDSlicer(sliceColName=colmap['night'], binsize=1) metric = metrics.NChangesMetric(col=colmap['filter'], orderBy=colmap['mjd'], metricName='Filter Changes') bundle = metricBundles.MetricBundle(metric, slicer, sql, summaryMetrics=standardStats, displayDict=displayDict) bundleList.append(bundle) # A few basic maps # Number of observations, coadded depths displayDict = {'group': 'Basic Maps', 'order': 3} slicer = slicers.HealpixSlicer(nside=nside, latCol=colmap['dec'], lonCol=colmap['ra'], latLonDeg=colmap['raDecDeg']) metric = metrics.CountMetric(col=colmap['mjd']) plotDict = {'percentileClip': 95.} for sql in sql_per_and_all_filters: bundle = metricBundles.MetricBundle(metric, slicer, sql, summaryMetrics=standardStats, displayDict=displayDict, plotDict=plotDict) bundleList.append(bundle) metric = metrics.Coaddm5Metric(m5Col=colmap['fiveSigmaDepth']) for sql in sql_per_and_all_filters: bundle = metricBundles.MetricBundle(metric, slicer, sql, summaryMetrics=standardStats, displayDict=displayDict) bundleList.append(bundle) # Checking a few basic science things # Maybe check astrometry, observation pairs, SN plotDict = {'percentileClip': 95.} displayDict = {'group': 'Science', 'subgroup': 'Astrometry', 'order': 4} stackerList = [] stacker = stackers.ParallaxFactorStacker(raCol=colmap['ra'], decCol=colmap['dec'], degrees=colmap['raDecDeg'], dateCol=colmap['mjd']) stackerList.append(stacker) # Maybe parallax and proper motion, fraction of visits in a good pair for SS displayDict['caption'] = r'Parallax precision of an $r=20$ flat SED star' metric = metrics.ParallaxMetric(m5Col=colmap['fiveSigmaDepth'], filterCol=colmap['filter'], seeingCol=colmap['seeingGeom']) sql = sqlConstraint bundle = metricBundles.MetricBundle(metric, slicer, sql, plotFuncs=subsetPlots, displayDict=displayDict, stackerList=stackerList, plotDict=plotDict) bundleList.append(bundle) displayDict[ 'caption'] = r'Proper motion precision of an $r=20$ flat SED star' metric = metrics.ProperMotionMetric(m5Col=colmap['fiveSigmaDepth'], mjdCol=colmap['mjd'], filterCol=colmap['filter'], seeingCol=colmap['seeingGeom']) bundle = metricBundles.MetricBundle(metric, slicer, sql, plotFuncs=subsetPlots, displayDict=displayDict, plotDict=plotDict) bundleList.append(bundle) # Solar system stuff displayDict['caption'] = 'Fraction of observations that are in pairs' displayDict['subgroup'] = 'Solar System' sql = '%s (filter="g" or filter="r" or filter="i")' % sqlC pairSlicer = slicers.HealpixSlicer(nside=pairnside, latCol=colmap['dec'], lonCol=colmap['ra'], latLonDeg=colmap['raDecDeg']) metric = metrics.PairFractionMetric(mjdCol=colmap['mjd']) bundle = metricBundles.MetricBundle(metric, pairSlicer, sql, plotFuncs=subsetPlots, displayDict=displayDict) bundleList.append(bundle) # stats from the note column if 'note' in colmap.keys(): displayDict = {'group': 'Basic Stats', 'subgroup': 'Percent stats'} metric = metrics.StringCountMetric(col=colmap['note'], percent=True, metricName='Percents') sql = '' slicer = slicers.UniSlicer() bundle = metricBundles.MetricBundle(metric, slicer, sql, displayDict=displayDict) bundleList.append(bundle) displayDict['subgroup'] = 'Count Stats' metric = metrics.StringCountMetric(col=colmap['note'], metricName='Counts') bundle = metricBundles.MetricBundle(metric, slicer, sql, displayDict=displayDict) bundleList.append(bundle) for b in bundleList: b.setRunName(runName) # Add hourglass plots. hrDict = hourglassBatch(colmap=colmap, runName=runName, nyears=nyears, extraSql=sqlConstraint) # Add basic slew stats. try: slewDict = slewBasics(colmap=colmap, runName=runName) except KeyError as e: warnings.warn( 'Could not add slew stats: missing required key %s from colmap' % (e)) bd = metricBundles.makeBundlesDictFromList(bundleList) bd.update(slewDict) bd.update(hrDict) return bd
def slewBasics(colmap=None, runName='opsim', sqlConstraint=None): """Generate a simple set of statistics about the slew times and distances. These slew statistics can be run on the summary or default tables. Parameters ---------- colmap : dict or None, opt A dictionary with a mapping of column names. Default will use OpsimV4 column names. runName : str, opt The name of the simulated survey. Default is "opsim". sqlConstraint : str or None, opt SQL constraint to add to metrics. (note this runs on summary table). Returns ------- metricBundleDict """ if colmap is None: colmap = ColMapDict('opsimV4') bundleList = [] # Calculate basic stats on slew times. (mean/median/min/max + total). slicer = slicers.UniSlicer() metadata = 'All visits' displayDict = { 'group': 'Slew', 'subgroup': 'Slew Basics', 'order': -1, 'caption': None } # Add total number of slews. metric = metrics.CountMetric(colmap['slewtime'], metricName='Slew Count') displayDict['caption'] = 'Total number of slews recorded in summary table.' displayDict['order'] += 1 bundle = mb.MetricBundle(metric, slicer, sqlConstraint, metadata=metadata, displayDict=displayDict) bundleList.append(bundle) for metric in standardMetrics(colmap['slewtime']): displayDict['caption'] = '%s in seconds.' % (metric.name) displayDict['order'] += 1 bundle = mb.MetricBundle(metric, slicer, sqlConstraint, metadata=metadata, displayDict=displayDict) bundleList.append(bundle) # Slew Time histogram. slicer = slicers.OneDSlicer(sliceColName=colmap['slewtime'], binsize=2) metric = metrics.CountMetric(col=colmap['slewtime'], metricName='Slew Time Histogram') metadata = 'All visits' plotDict = {'logScale': True, 'ylabel': 'Count'} displayDict[ 'caption'] = 'Histogram of slew times (seconds) for all visits.' displayDict['order'] += 1 bundle = mb.MetricBundle(metric, slicer, sqlConstraint, metadata=metadata, plotDict=plotDict, displayDict=displayDict) bundleList.append(bundle) # Slew distance histogram, if available. if colmap['slewdist'] is not None: slicer = slicers.OneDSlicer(sliceColName=colmap['slewdist']) metric = metrics.CountMetric(col=colmap['slewdist'], metricName='Slew Distance Histogram') plotDict = {'logScale': True, 'ylabel': 'Count'} displayDict[ 'caption'] = 'Histogram of slew distances (angle) for all visits.' displayDict['order'] += 1 bundle = mb.MetricBundle(metric, slicer, sqlConstraint, metadata=metadata, plotDict=plotDict, displayDict=displayDict) bundleList.append(bundle) # Set the runName for all bundles and return the bundleDict. for b in bundleList: b.setRunName(runName) return mb.makeBundlesDictFromList(bundleList)
def save_csv_dithers(dbs_path, outDir, db_files_only=None, rot_rand_seed=42, trans_rand_seed=42, print_progress=True, show_diagnostic_plots=False, save_plots=False): """ The goal here is to calculate the translational and rotational dithers for various cadences and save the output as a csv file. These dithers are largely the same as in DC1/DC2: - Translational dithers: - WFD: large random offsets (as large as 1.75 deg) applied after every visit. - DD: small random offsets (as large as 7 arcmin) applied after every visit. - Else: no dithers, so `fieldRA`, `fieldDec` are returned. - Rotational dithers: - All surveys (WFD, DD, else): random between -90, 90 degrees applied after every filter change. (Break from DC2: Some visits dont get dithered since they are forced outside the rotator range. See RotStacker info for details.) Supports OpSim V3/V4 outputs. Required Inputs --------------- * dbs_path: str: path to the directory that contains the .db files; could have non-.db files. * outDir: str: path to the directory where the output should be saved. Optional Inputs --------------- * db_files_only: list of str: list of names of the db files to run. Default: None. Runs over all the files in db_path. * rot_rand_seed: int: seed for random number generator for rotational dithers. Default: 42 * trans_rand_seed: int: seed for random number generator for translational dithers. Default: 42 * print_progress: bool: set to False to not print progress. Default: True * show_diagnostic_plots: bool: set to True to show histogram of added dithers. Default: False * save_plots: bool: set to True to save the histogram for descDithers in outDir. Default: False Saved file format ----------------- .csv file with four columns: obsIDcol, 'descDitheredRA', 'descDitheredDec', 'descDitheredRotTelPos' where obsIDcol = 'observationId' for V4 outputs and 'obsHistID' for V3 outputs. Saved filename = descDithers_<database name>.csv """ startTime_0 = time.time() readme = '##############################\n%s' % (datetime.date.isoformat( datetime.date.today())) readme += '\nRunning with lsst.sims.maf.__version__: %s' % lsst.sims.maf.__version__ readme += '\n\nsave_csv_dithers run:\ndbs_path= %s\n' % dbs_path readme += 'outDir: %s' % outDir readme += 'db_files_only: %s' % db_files_only readme += 'rot_rand_seed=%s\ntrans_rand_seed=%s' % (rot_rand_seed, trans_rand_seed) readme += 'print_progress=%s\show_diagnostic_plots=%s\n' % ( print_progress, show_diagnostic_plots) dbfiles = [f for f in os.listdir(dbs_path) if f.endswith('db')] # select db files if print_progress: print('Found files: %s\n' % dbfiles) if db_files_only is not None: dbfiles = [f for f in dbfiles if f in db_files_only] # select db files readme += '\nReading for files: %s\n\n' % dbfiles if print_progress and db_files_only is not None: print('Running over: %s\n' % dbfiles) for i, dbfile in enumerate(dbfiles): # loop over all the db files startTime = time.time() if (i != 0): readme = '' readme += '%s' % dbfile if print_progress: print('Starting: %s\n' % dbfile) opsdb = db.OpsimDatabase('%s/%s' % (dbs_path, dbfile)) # connect to the database # specify the column names to get from the db file colnames = [ 'proposalId', 'observationId', 'fieldRA', 'fieldDec', 'rotTelPos' ] propIDcol, obsIDcol = 'proposalId', 'observationId' if (opsdb.opsimVersion == 'V3'): # V3 outputs have somewhat different column names colnames = [ 'propID', 'obsHistID', 'fieldRA', 'fieldDec', 'rotTelPos' ] propIDcol, obsIDcol = 'propID', 'obsHistID' # get the data simdata = opsdb.fetchMetricData(colnames=colnames, sqlconstraint=None) # set up to run the stackers that add columns for translational and rotational dithers. metric = metrics.PassMetric( ) # want to access the database; no analysis needed slicer = slicers.OneDSlicer( sliceColName='night', binsize=1, verbose=print_progress) # essentially accessing all nights sqlconstraint = None resultsDb = db.ResultsDb(outDir=outDir) ################################################################################################ # set up metric bundle to run stackers for large translational dithers + rotational dithers if print_progress: print('Setting up for WFD translational dithers + rot dithers.') bgroup = {} stackerList = [ stackers.RandomDitherFieldPerVisitStacker( degrees=opsdb.raDecInDeg, randomSeed=trans_rand_seed), stackers.RandomRotDitherPerFilterChangeStacker( degrees=opsdb.raDecInDeg, randomSeed=rot_rand_seed) ] bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint=sqlconstraint, stackerList=stackerList) bgroup['WFD'] = metricBundles.MetricBundleGroup({0: bundle}, opsdb, outDir=outDir, resultsDb=resultsDb, saveEarly=False, verbose=print_progress) # run the bundle bgroup['WFD'].runAll() # set up the bundle for small translational dithers if print_progress: print('\nSetting up for DD translational dithers.') chipSize = 1.75 * 2 / 15 chipMaxDither = chipSize / 2. stackerList = [ stackers.RandomDitherFieldPerVisitStacker( maxDither=chipMaxDither, degrees=opsdb.raDecInDeg, randomSeed=trans_rand_seed) ] bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint=sqlconstraint, stackerList=stackerList) bgroup['DD'] = metricBundles.MetricBundleGroup({0: bundle}, opsdb, outDir=outDir, resultsDb=resultsDb, saveEarly=False, verbose=print_progress) # run the bundle bgroup['DD'].runAll() ################################################################################################ # access the relevant columns dithered_RA, dithered_Dec = {}, {} for key in bgroup: dithered_RA[key] = bgroup[key].simData[ 'randomDitherFieldPerVisitRa'] dithered_Dec[key] = bgroup[key].simData[ 'randomDitherFieldPerVisitDec'] dithered_rotTelPos = bgroup['WFD'].simData[ 'randomDitherPerFilterChangeRotTelPos'] ################################################################################################ # diagnostic plots if show_diagnostic_plots: # histograms of dithers fig, axes = plt.subplots(nrows=1, ncols=3) for key in bgroup: # ra axes[0].hist(dithered_RA[key] - simdata['fieldRA'], label='%s dithers: delRA' % key, histtype='step', lw=2, bins=30) # dec axes[1].hist(dithered_Dec[key] - simdata['fieldDec'], label='%s dithers: delDec' % key, histtype='step', lw=2) # tel pos axes[2].hist(dithered_rotTelPos - simdata['rotTelPos'], label='rot dithers: rotTelPos', histtype='step', lw=2) for ax in axes: ax.ticklabel_format(style='sci', axis='y', scilimits=(0, 0)) ax.set_ylabel('Counts') axes[0].legend() axes[1].legend() if opsdb.raDecInDeg: unitlabel = 'degrees' else: unitlabel = 'radians' axes[0].set_xlabel('delRA (%s)' % unitlabel) axes[1].set_xlabel('delDec (%s)' % unitlabel) axes[2].set_xlabel('delRotTelPos (%s)' % unitlabel) plt.title(dbfile) fig.set_size_inches(20, 5) ################################################################################################ # initiate the final arrays as undithered fieldRA, fieldDec as nonWFD, nonDDF should remain unchanged descDitheredRA = simdata['fieldRA'].copy() descDitheredDec = simdata['fieldDec'].copy() descDitheredRot = simdata['rotTelPos'].copy() # need to find the indices for WFD vs. DD observations since we are adding different # translational dithers for WFD/DDF visits + none for other surveys propIds, propTags = opsdb.fetchPropInfo() # ok work with WFD visits now ind_WFD = np.where(simdata[propIDcol] == propTags['WFD'])[0] if print_progress: tot = len(simdata) print('Total visits: ', tot) print('propTags: ', propTags) print('%s WFD visits out of total %s' % (len(ind_WFD), tot)) descDitheredRA[ind_WFD] = dithered_RA['WFD'][ind_WFD] descDitheredDec[ind_WFD] = dithered_Dec['WFD'][ind_WFD] # work with DD visits now ind_DD = np.where(simdata[propIDcol] == propTags['DD'])[0] if print_progress: print('%s DD visits out of total %s' % (len(ind_DD), tot)) descDitheredRA[ind_DD] = dithered_RA['DD'][ind_DD] descDitheredDec[ind_DD] = dithered_Dec['DD'][ind_DD] # add rotational dithers to everything descDitheredRot = dithered_rotTelPos ############################################################### # diagnostic plots if show_diagnostic_plots or save_plots: # histograms of desc dithered positions fig, axes = plt.subplots(nrows=1, ncols=3) _, bins, _ = axes[0].hist(descDitheredRA, label='descDitheredRA', histtype='step', lw=2) axes[0].hist(simdata['fieldRA'], label='fieldRA', histtype='step', lw=2, bins=bins) _, bins, _ = axes[1].hist(descDitheredDec, label='descDitheredDec', histtype='step', lw=2) axes[1].hist(simdata['fieldDec'], label='fieldDec', histtype='step', lw=2, bins=bins) _, bins, _ = axes[2].hist(descDitheredRot, label='descDitheredRot', histtype='step', lw=2) axes[2].hist(simdata['rotTelPos'], label='rotTelPos', histtype='step', lw=2, bins=bins) if opsdb.raDecInDeg: xlabel = 'degrees' else: xlabel = 'radians' for ax in axes: ax.legend() ax.set_xlabel(xlabel) ax.set_ylabel('Counts') plt.suptitle(dbfile) fig.set_size_inches(20, 5) if save_plots: filename = 'hist_descDithers_%s.png' % (dbfile.split('.db')[0]) plt.savefig('%s/%s' % (outDir, filename), format='png', bbox_inches='tight') readme += '\nSaved hist for descDithers in %s.' % filename if print_progress: print('\nSaved hist plot in %s' % filename) if show_diagnostic_plots: plt.show() else: plt.close('all') ############################################################### # save the columns as a csv file. d = { obsIDcol: simdata[obsIDcol], 'descDitheredRA': descDitheredRA, 'descDitheredDec': descDitheredDec, 'descDitheredRotTelPos': descDitheredRot } filename = 'descDithers_%s.csv' % (dbfile.split('.db')[0]) pd.DataFrame(d).to_csv('%s/%s' % (outDir, filename), index=False) readme += '\nSaved the dithers in %s' % filename readme += '\nTime taken: %.2f (min)\n\n' % ( (time.time() - startTime) / 60.) if print_progress: print('\nSaved the dithers in %s' % filename) print('Time taken: %.2f (min)\n\n' % ((time.time() - startTime) / 60.)) readme_file = open('%s/readme.txt' % (outDir), 'a') readme_file.write(readme) readme_file.close() # mark the end in the readme. readme_file = open('%s/readme.txt' % (outDir), 'a') readme_file.write('All done. Total time taken: %.2f (min)\n\n' % ((time.time() - startTime_0) / 60.)) readme_file.close()
def openshutterFractions(colmap=None, runName='opsim', extraSql=None, extraMetadata=None): """Evaluate open shutter fraction over whole survey and per night. Parameters ---------- colmap : dict, opt A dictionary with a mapping of column names. Default will use OpsimV4 column names. runName : str, opt The name of the simulated survey. Default is "opsim". extraSql : str, opt Additional constraint to add to any sql constraints (e.g. 'night<365') Default None, for no additional constraints. extraMetadata : str, opt Additional metadata to add before any below (i.e. "WFD"). Default is None. """ if colmap is None: colmap = ColMapDict('opsimV4') bundleList = [] group = 'Open Shutter Fraction' subgroup = 'All visits' if extraMetadata is not None: subgroup = extraMetadata + ' ' + subgroup.lower() elif extraSql is not None and extraMetadata is None: subgroup = subgroup + ' ' + extraSql # Open Shutter fraction over whole survey. displayDict = {'group': group, 'subgroup': subgroup, 'order': 0} displayDict[ 'caption'] = 'Total open shutter fraction over %s. ' % subgroup.lower( ) displayDict['caption'] += 'Does not include downtime due to weather.' metric = metrics.OpenShutterFractionMetric( slewTimeCol=colmap['slewtime'], expTimeCol=colmap['exptime'], visitTimeCol=colmap['visittime']) slicer = slicers.UniSlicer() bundle = mb.MetricBundle(metric, slicer, extraSql, metadata=subgroup, displayDict=displayDict) bundleList.append(bundle) # Count the number of nights on-sky in the survey. displayDict[ 'caption'] = 'Number of nights on the sky during the survey, %s.' % subgroup.lower( ) metric = metrics.CountUniqueMetric(colmap['night']) slicer = slicers.UniSlicer() bundle = mb.MetricBundle(metric, slicer, extraSql, metadata=subgroup, displayDict=displayDict) bundleList.append(bundle) # Count the number of nights total in the survey (start to finish of observations). displayDict[ 'caption'] = 'Number of nights from start to finish of survey, %s.' % subgroup.lower( ) metric = metrics.FullRangeMetric(colmap['night']) slicer = slicers.UniSlicer() bundle = mb.MetricBundle(metric, slicer, extraSql, metadata=subgroup, displayDict=displayDict) bundleList.append(bundle) # Open shutter fraction per night. subgroup = 'Per night' if extraMetadata is not None: subgroup = extraMetadata + ' ' + subgroup.lower() elif extraSql is not None and extraMetadata is None: subgroup = subgroup + ' ' + extraSql displayDict = {'group': group, 'subgroup': subgroup, 'order': 0} displayDict['caption'] = 'Open shutter fraction %s.' % (subgroup.lower()) displayDict['caption'] += ' This compares on-sky image time against on-sky time + slews + filter ' \ 'changes + readout, but does not include downtime due to weather.' metric = metrics.OpenShutterFractionMetric( slewTimeCol=colmap['slewtime'], expTimeCol=colmap['exptime'], visitTimeCol=colmap['visittime']) slicer = slicers.OneDSlicer(sliceColName=colmap['night'], binsize=1) bundle = mb.MetricBundle(metric, slicer, extraSql, metadata=subgroup, summaryMetrics=standardSummary(), displayDict=displayDict) bundleList.append(bundle) # Set the runName for all bundles and return the bundleDict. for b in bundleList: b.setRunName(runName) return mb.makeBundlesDictFromList(bundleList)
def plot_progress_by_band(): """Plot LSST progress by band. Parameters ---------- Returns ------- fig : the `matplotlib.figure.Figure` the figure with the plot axes : `list(matplotlib.axes.Axes)` the axes in the plot """ ops_db = lsst.sims.maf.db.OpsimDatabase(SIM_DATABASE_FNAME) vector_bins = np.arange(365 * 10 + 2) metric = metrics.AccumulateCountMetric(col="observationStartMJD", bins=vector_bins, binCol="night") slicer = slicers.UniSlicer() bundles = {} for band in plotprep.BANDS: sql = f"filter = '{band}'" bundles[band] = metricBundles.MetricBundle(metric, slicer, sql, plotDict={}, plotFuncs=[], summaryMetrics=[]) sql = "" slicer = slicers.OneDSlicer(sliceColName="night", bins=vector_bins) metric = metrics.MeanMetric(col="moonPhase") bundles["moon"] = metricBundles.MetricBundle(metric, slicer, sql) sql = "" slicer = slicers.OneDSlicer(sliceColName="night", bins=vector_bins) #metric = metrics.MeanMetric(col="observationStartMJD") metric = metrics.MeanMetric(col="observationStartMJD") bundles["mjd"] = metricBundles.MetricBundle(metric, slicer, sql) metric_group = metricBundles.MetricBundleGroup(bundles, ops_db) metric_group.runAll() fig, ax = plt.subplots() num_nights = NUM_NIGHTS mjd = bundles["mjd"].metricValues[:num_nights] for band in plotprep.BANDS: bundle = bundles[band] ax.plot( mjd, bundle.metricValues[0, :num_nights], c=plotprep.BAND_COLOR[band], label=band, ) ax.scatter( mjd, np.zeros(num_nights), c=bundles["moon"].metricValues[:num_nights], cmap="cividis", s=5, ) ax.legend() start_date = pd.to_datetime(mjd.min() - 15 + 2400000.5, unit="D", origin="julian") end_date = pd.to_datetime(mjd.max() + 15 + 2400000.5, unit="D", origin="julian") date_seq = pd.date_range(start=start_date, end=end_date, freq="MS") ax.set_xticks(date_seq.to_julian_date() - 2400000.5) ax.set_xticklabels(str(d)[:10] for d in date_seq) ax.set_ylabel("Number of visits") return fig, ax
def metadataBasics(value, colmap=None, runName='opsim', valueName=None, groupName=None, extraSql=None, extraMetadata=None, nside=64, filterlist=('u', 'g', 'r', 'i', 'z', 'y')): """Calculate basic metrics on visit metadata 'value' (e.g. airmass, normalized airmass, seeing..). Calculates extended standard metrics (with unislicer) on the quantity (all visits and per filter), makes histogram of the value (all visits and per filter), Parameters ---------- value : str The column name for the quantity to evaluate. (column name in the database or created by a stacker). colmap : dict or None, opt A dictionary with a mapping of column names. Default will use OpsimV4 column names. runName : str, opt The name of the simulated survey. Default is "opsim". valueName : str, opt The name of the value to be reported in the resultsDb and added to the metric. This is intended to help standardize metric comparison between sim versions. value = name as it is in the database (seeingFwhmGeom, etc). valueName = name to be recorded ('seeingGeom', etc.). Default is None, which is set to match value. groupName : str, opt The group name for this quantity in the displayDict. Default is the same as 'value', capitalized. extraSql : str, opt Additional constraint to add to any sql constraints (e.g. 'propId=1' or 'fieldID=522'). Default None, for no additional constraints. extraMetadata : str, opt Additional metadata to add before any below (i.e. "WFD"). Default is None. nside : int, opt Nside value for healpix slicer. Default 64. If "None" is passed, the healpixslicer-based metrics will be skipped. filterlist : list of str, opt List of the filternames to use for "per filter" evaluation. Default ('u', 'g', 'r', 'i', 'z', 'y'). If None is passed, the per-filter evaluations will be skipped. Returns ------- metricBundleDict """ if colmap is None: colmap = ColMapDict('opsimV4') bundleList = [] if valueName is None: valueName = value if groupName is None: groupName = valueName.capitalize() subgroup = extraMetadata else: groupName = groupName.capitalize() subgroup = valueName.capitalize() displayDict = {'group': groupName, 'subgroup': subgroup} sqlconstraints = [''] metadata = ['all bands'] if filterlist is not None: sqlconstraints += [ '%s = "%s"' % (colmap['filter'], f) for f in filterlist ] metadata += ['%s band' % f for f in filterlist] if (extraSql is not None) and (len(extraSql) > 0): tmp = [] for s in sqlconstraints: if len(s) == 0: tmp.append(extraSql) else: tmp.append('%s and (%s)' % (s, extraSql)) sqlconstraints = tmp if extraMetadata is None: metadata = ['%s %s' % (extraSql, m) for m in metadata] if extraMetadata is not None: metadata = ['%s %s' % (extraMetadata, m) for m in metadata] # Summarize values over all and per filter (min/mean/median/max/percentiles/outliers/rms). slicer = slicers.UniSlicer() displayDict['caption'] = None for sql, meta in zip(sqlconstraints, metadata): displayDict['order'] = -1 for m in extendedMetrics(value, replace_colname=valueName): displayDict['order'] += 1 bundle = mb.MetricBundle(m, slicer, sql, metadata=meta, displayDict=displayDict) bundleList.append(bundle) # Histogram values over all and per filter. for sql, meta in zip(sqlconstraints, metadata): displayDict['caption'] = 'Histogram of %s' % (value) if valueName != value: displayDict['caption'] += ' (%s)' % (valueName) displayDict['caption'] += ' for %s visits.' % (meta) displayDict['order'] += 1 m = metrics.CountMetric(value, metricName='%s Histogram' % (valueName)) slicer = slicers.OneDSlicer(sliceColName=value) bundle = mb.MetricBundle(m, slicer, sql, metadata=meta, displayDict=displayDict) bundleList.append(bundle) # Make maps of min/median/max for all and per filter, per RA/Dec, with standard summary stats. mList = [] mList.append(metrics.MinMetric(value, metricName='Min %s' % (valueName))) mList.append( metrics.MedianMetric(value, metricName='Median %s' % (valueName))) mList.append(metrics.MaxMetric(value, metricName='Max %s' % (valueName))) slicer = slicers.HealpixSlicer(nside=nside, latCol=colmap['dec'], lonCol=colmap['ra'], latLonDeg=colmap['raDecDeg']) subsetPlots = [plots.HealpixSkyMap(), plots.HealpixHistogram()] displayDict['caption'] = None displayDict['order'] = -1 for sql, meta in zip(sqlconstraints, metadata): for m in mList: displayDict['order'] += 1 bundle = mb.MetricBundle(m, slicer, sql, metadata=meta, plotFuncs=subsetPlots, displayDict=displayDict, summaryMetrics=standardSummary()) bundleList.append(bundle) # Set the runName for all bundles and return the bundleDict. for b in bundleList: b.setRunName(runName) return mb.makeBundlesDictFromList(bundleList)
def plot_progress_by_quadrant(): """Plot LSST progress by quadrant of the sky. Parameters ---------- Returns ------- fig : the `matplotlib.figure.Figure` the figure with the plot axes : `list(matplotlib.axes.Axes)` the axes in the plot """ ops_db = lsst.sims.maf.db.OpsimDatabase(SIM_DATABASE_FNAME) vector_bins = np.arange(366 * 10) metric = metrics.AccumulateCountMetric(col="observationStartMJD", bins=vector_bins, binCol="night") slicer = slicers.UniSlicer() bundles = {} quads = np.arange(0, 360, 90) for quad in quads: sql = f"fieldRA >= {quad} and fieldRA < {quad+90}" bundles[quad] = metricBundles.MetricBundle(metric, slicer, sql, plotDict={}, plotFuncs=[], summaryMetrics=[]) sql = "" slicer = slicers.OneDSlicer(sliceColName="night", bins=vector_bins) metric = metrics.MeanMetric(col="observationStartMJD") bundles["mjd"] = metricBundles.MetricBundle(metric, slicer, sql) metric_group = metricBundles.MetricBundleGroup(bundles, ops_db) metric_group.runAll() fig, ax = plt.subplots() num_nights = NUM_NIGHTS mjd = bundles["mjd"].metricValues[:num_nights] for quad in quads: bundle = bundles[quad] ax.plot( mjd, bundle.metricValues[0, :num_nights], label=f"{quad}$^\circ$ $\leq$ R.A. < {quad+90}$^\circ$", ) ax.legend() start_date = pd.to_datetime(mjd.min() - 15 + 2400000.5, unit="D", origin="julian") end_date = pd.to_datetime(mjd.max() + 15 + 2400000.5, unit="D", origin="julian") date_seq = pd.date_range(start=start_date, end=end_date, freq="Q") ax.set_xticks(date_seq.to_julian_date() - 2400000.5) ax.set_xticklabels([str(d)[:10] for d in date_seq], rotation=15) ax.set_ylabel("Number of visits") return fig, ax
bundleList.append(bundle) # UniSlicer slicer = slicers.UniSlicer() metric = metrics.MeanMetric(col='airmass') bundle = metricBundles.MetricBundle(metric, slicer, sqlWhere) bundleList.append(bundle) # HealpixSlicer slicer = slicers.HealpixSlicer(nside=16) metric = metrics.MeanMetric(col='airmass', metricName='MeanAirmass_heal') bundle = metricBundles.MetricBundle(metric, slicer, sqlWhere) bundleList.append(bundle) # OneDSlicer slicer = slicers.OneDSlicer(sliceColName='night', binsize=10) metric = metrics.CountMetric(col='expMJD') bundle = metricBundles.MetricBundle(metric, slicer, sqlWhere) bundleList.append(bundle) # OpsimFieldSlicer slicer = slicers.OpsimFieldSlicer() metric = metrics.MeanMetric(col='airmass') bundle = metricBundles.MetricBundle(metric, slicer, sqlWhere) bundleList.append(bundle) # UserPointsSlicer ra = np.arange(0, 101, 1) / 100. * np.pi dec = np.arange(0, 101, 1) / 100. * (-np.pi) slicer = slicers.UserPointsSlicer(ra=ra, dec=dec) metric = metrics.MeanMetric(col='airmass', metricName='meanAirmass_user')