Beispiel #1
0
def tdcBatch(colmap=None, runName='opsim', nside=64, accuracyThreshold=0.04,
             extraSql=None, extraMetadata=None):
    # The options to add additional sql constraints are removed for now.
    if colmap is None:
        colmap = ColMapDict('fbs')

    # Calculate a subset of DESC WFD-related metrics.
    displayDict = {'group': 'Strong Lensing'}
    displayDict['subgroup'] = 'Lens Time Delay'

    subsetPlots = [plots.HealpixSkyMap(), plots.HealpixHistogram()]

    summaryMetrics = [metrics.MeanMetric(), metrics.MedianMetric(), metrics.RmsMetric()]
    # Ideally need a way to do better on calculating the summary metrics for the high accuracy area.

    slicer = slicers.HealpixSlicer(nside=nside)
    tdcMetric = metrics.TdcMetric(metricName='TDC', nightCol=colmap['night'],
                                  expTimeCol=colmap['exptime'], mjdCol=colmap['mjd'])

    bundle = mb.MetricBundle(tdcMetric, slicer, constraint=extraSql, metadata=extraMetadata,
                             displayDict=displayDict, plotFuncs=subsetPlots,
                             summaryMetrics=summaryMetrics)

    bundleList = [bundle]

    # Set the runName for all bundles and return the bundleDict.
    for b in bundleList:
        b.setRunName(runName)
    return mb.makeBundlesDictFromList(bundleList)
Beispiel #2
0
def standardMetrics(colname, replace_colname=None):
    """A set of standard simple metrics for some quanitity. Typically would be applied with unislicer.

    Parameters
    ----------
    colname : str
        The column name to apply the metrics to.
    replace_colname: str or None, opt
        Value to replace colname with in the metricName.
        i.e. if replace_colname='' then metric name is Mean, instead of Mean Airmass, or
        if replace_colname='seeingGeom', then metric name is Mean seeingGeom instead of Mean seeingFwhmGeom.
        Default is None, which does not alter the metric name.

    Returns
    -------
    List of configured metrics.
    """
    standardMetrics = [
        metrics.MeanMetric(colname),
        metrics.MedianMetric(colname),
        metrics.MinMetric(colname),
        metrics.MaxMetric(colname)
    ]
    if replace_colname is not None:
        for m in standardMetrics:
            if len(replace_colname) > 0:
                m.name = m.name.replace('%s' % colname, '%s' % replace_colname)
            else:
                m.name = m.name.rstrip(' %s' % colname)
    return standardMetrics
Beispiel #3
0
def standardSummary():
    """A set of standard summary metrics, to calculate Mean, RMS, Median, #, Max/Min, and # 3-sigma outliers.
    """
    standardSummary = [
        metrics.MeanMetric(),
        metrics.RmsMetric(),
        metrics.MedianMetric(),
        metrics.CountMetric(),
        metrics.MaxMetric(),
        metrics.MinMetric(),
        metrics.NoutliersNsigmaMetric(metricName='N(+3Sigma)', nSigma=3),
        metrics.NoutliersNsigmaMetric(metricName='N(-3Sigma)', nSigma=-3.)
    ]
    return standardSummary
Beispiel #4
0
def scienceRadarBatch(colmap=None,
                      runName='opsim',
                      extraSql=None,
                      extraMetadata=None,
                      nside=64,
                      benchmarkArea=18000,
                      benchmarkNvisits=825,
                      DDF=True):
    """A batch of metrics for looking at survey performance relative to the SRD and the main
    science drivers of LSST.

    Parameters
    ----------

    """
    # Hide dependencies
    from mafContrib.LSSObsStrategy.galaxyCountsMetric_extended import GalaxyCountsMetric_extended
    from mafContrib import Plasticc_metric, plasticc_slicer, load_plasticc_lc, TDEsAsciiMetric

    if colmap is None:
        colmap = ColMapDict('fbs')

    if extraSql is None:
        extraSql = ''
    if extraSql == '':
        joiner = ''
    else:
        joiner = ' and '

    bundleList = []
    # Get some standard per-filter coloring and sql constraints
    filterlist, colors, filterorders, filtersqls, filtermetadata = filterList(
        all=False, extraSql=extraSql, extraMetadata=extraMetadata)

    standardStats = standardSummary(withCount=False)

    healslicer = slicers.HealpixSlicer(nside=nside)
    subsetPlots = [plots.HealpixSkyMap(), plots.HealpixHistogram()]

    # Load up the plastic light curves
    models = ['SNIa-normal', 'KN']
    plasticc_models_dict = {}
    for model in models:
        plasticc_models_dict[model] = list(
            load_plasticc_lc(model=model).values())

    #########################
    # SRD, DM, etc
    #########################
    fOb = fOBatch(runName=runName,
                  colmap=colmap,
                  extraSql=extraSql,
                  extraMetadata=extraMetadata,
                  benchmarkArea=benchmarkArea,
                  benchmarkNvisits=benchmarkNvisits)
    astromb = astrometryBatch(runName=runName,
                              colmap=colmap,
                              extraSql=extraSql,
                              extraMetadata=extraMetadata)
    rapidb = rapidRevisitBatch(runName=runName,
                               colmap=colmap,
                               extraSql=extraSql,
                               extraMetadata=extraMetadata)

    # loop through and modify the display dicts - set SRD as group and their previous 'group' as the subgroup
    temp_list = []
    for key in fOb:
        temp_list.append(fOb[key])
    for key in astromb:
        temp_list.append(astromb[key])
    for key in rapidb:
        temp_list.append(rapidb[key])
    for metricb in temp_list:
        metricb.displayDict['subgroup'] = metricb.displayDict['group'].replace(
            'SRD', '').lstrip(' ')
        metricb.displayDict['group'] = 'SRD'
    bundleList.extend(temp_list)

    displayDict = {
        'group': 'SRD',
        'subgroup': 'Year Coverage',
        'order': 0,
        'caption': 'Number of years with observations.'
    }
    slicer = slicers.HealpixSlicer(nside=nside)
    metric = metrics.YearCoverageMetric()
    for f in filterlist:
        plotDict = {'colorMin': 7, 'colorMax': 10, 'color': colors[f]}
        summary = [
            metrics.AreaSummaryMetric(area=18000,
                                      reduce_func=np.mean,
                                      decreasing=True,
                                      metricName='N Seasons (18k) %s' % f)
        ]
        bundleList.append(
            mb.MetricBundle(metric,
                            slicer,
                            filtersqls[f],
                            plotDict=plotDict,
                            metadata=filtermetadata[f],
                            displayDict=displayDict,
                            summaryMetrics=summary))

    #########################
    # Solar System
    #########################
    # Generally, we need to run Solar System metrics separately; they're a multi-step process.

    #########################
    # Cosmology
    #########################

    displayDict = {
        'group': 'Cosmology',
        'subgroup': 'Galaxy Counts',
        'order': 0,
        'caption': None
    }
    plotDict = {'percentileClip': 95., 'nTicks': 5}
    sql = extraSql + joiner + 'filter="i"'
    metadata = combineMetadata(extraMetadata, 'i band')
    metric = GalaxyCountsMetric_extended(filterBand='i',
                                         redshiftBin='all',
                                         nside=nside)
    summary = [
        metrics.AreaSummaryMetric(area=18000,
                                  reduce_func=np.sum,
                                  decreasing=True,
                                  metricName='N Galaxies (18k)')
    ]
    summary.append(metrics.SumMetric(metricName='N Galaxies (all)'))
    # make sure slicer has cache off
    slicer = slicers.HealpixSlicer(nside=nside, useCache=False)
    bundle = mb.MetricBundle(metric,
                             slicer,
                             sql,
                             plotDict=plotDict,
                             metadata=metadata,
                             displayDict=displayDict,
                             summaryMetrics=summary,
                             plotFuncs=subsetPlots)
    bundleList.append(bundle)
    displayDict['order'] += 1

    # let's put Type Ia SN in here
    displayDict['subgroup'] = 'SNe Ia'
    # XXX-- use the light curves from PLASTICC here
    displayDict['caption'] = 'Fraction of normal SNe Ia'
    sql = extraSql
    slicer = plasticc_slicer(plcs=plasticc_models_dict['SNIa-normal'],
                             seed=42,
                             badval=0)
    metric = Plasticc_metric(metricName='SNIa')
    # Set the maskval so that we count missing objects as zero.
    summary_stats = [metrics.MeanMetric(maskVal=0)]
    plotFuncs = [plots.HealpixSkyMap()]
    bundle = mb.MetricBundle(metric,
                             slicer,
                             sql,
                             runName=runName,
                             summaryMetrics=summary_stats,
                             plotFuncs=plotFuncs,
                             metadata=extraMetadata,
                             displayDict=displayDict)
    bundleList.append(bundle)
    displayDict['order'] += 1

    displayDict['subgroup'] = 'Camera Rotator'
    displayDict[
        'caption'] = 'Kuiper statistic (0 is uniform, 1 is delta function) of the '
    slicer = slicers.HealpixSlicer(nside=nside)
    metric1 = metrics.KuiperMetric('rotSkyPos')
    metric2 = metrics.KuiperMetric('rotTelPos')
    for f in filterlist:
        for m in [metric1, metric2]:
            plotDict = {'color': colors[f]}
            displayDict['order'] = filterorders[f]
            displayDict['caption'] += f"{m.colname} for visits in {f} band."
            bundleList.append(
                mb.MetricBundle(m,
                                slicer,
                                filtersqls[f],
                                plotDict=plotDict,
                                displayDict=displayDict,
                                summaryMetrics=standardStats,
                                plotFuncs=subsetPlots))

    # XXX--need some sort of metric for weak lensing

    #########################
    # Variables and Transients
    #########################
    displayDict = {
        'group': 'Variables/Transients',
        'subgroup': 'Periodic Stars',
        'order': 0,
        'caption': None
    }
    for period in [
            0.5,
            1,
            2,
    ]:
        for magnitude in [21., 24.]:
            amplitudes = [0.05, 0.1, 1.0]
            periods = [period] * len(amplitudes)
            starMags = [magnitude] * len(amplitudes)

            plotDict = {
                'nTicks': 3,
                'colorMin': 0,
                'colorMax': 3,
                'xMin': 0,
                'xMax': 3
            }
            metadata = combineMetadata(
                'P_%.1f_Mag_%.0f_Amp_0.05-0.1-1' % (period, magnitude),
                extraMetadata)
            sql = None
            displayDict['caption'] = 'Metric evaluates if a periodic signal of period %.1f days could ' \
                                     'be detected for an r=%i star. A variety of amplitudes of periodicity ' \
                                     'are tested: [1, 0.1, and 0.05] mag amplitudes, which correspond to ' \
                                     'metric values of [1, 2, or 3]. ' % (period, magnitude)
            metric = metrics.PeriodicDetectMetric(periods=periods,
                                                  starMags=starMags,
                                                  amplitudes=amplitudes,
                                                  metricName='PeriodDetection')
            bundle = mb.MetricBundle(metric,
                                     healslicer,
                                     sql,
                                     metadata=metadata,
                                     displayDict=displayDict,
                                     plotDict=plotDict,
                                     plotFuncs=subsetPlots,
                                     summaryMetrics=standardStats)
            bundleList.append(bundle)
            displayDict['order'] += 1

    # XXX add some PLASTICC metrics for kilovnova and tidal disruption events.
    displayDict['subgroup'] = 'KN'
    displayDict['caption'] = 'Fraction of Kilonova (from PLASTICC)'
    displayDict['order'] = 0
    slicer = plasticc_slicer(plcs=plasticc_models_dict['KN'],
                             seed=43,
                             badval=0)
    metric = Plasticc_metric(metricName='KN')
    plotFuncs = [plots.HealpixSkyMap()]
    summary_stats = [metrics.MeanMetric(maskVal=0)]
    bundle = mb.MetricBundle(metric,
                             slicer,
                             extraSql,
                             runName=runName,
                             summaryMetrics=summary_stats,
                             plotFuncs=plotFuncs,
                             metadata=extraMetadata,
                             displayDict=displayDict)
    bundleList.append(bundle)

    # Tidal Disruption Events
    displayDict['subgroup'] = 'TDE'
    displayDict[
        'caption'] = 'Fraction of TDE lightcurves that could be identified, outside of DD fields'
    detectSNR = {'u': 5, 'g': 5, 'r': 5, 'i': 5, 'z': 5, 'y': 5}

    # light curve parameters
    epochStart = -22
    peakEpoch = 0
    nearPeakT = 10
    postPeakT = 14  # two weeks
    nPhaseCheck = 1

    # condition parameters
    nObsTotal = {'u': 0, 'g': 0, 'r': 0, 'i': 0, 'z': 0, 'y': 0}
    nObsPrePeak = 1
    nObsNearPeak = {'u': 0, 'g': 0, 'r': 0, 'i': 0, 'z': 0, 'y': 0}
    nFiltersNearPeak = 3
    nObsPostPeak = 0
    nFiltersPostPeak = 2

    metric = TDEsAsciiMetric(asciifile=None,
                             detectSNR=detectSNR,
                             epochStart=epochStart,
                             peakEpoch=peakEpoch,
                             nearPeakT=nearPeakT,
                             postPeakT=postPeakT,
                             nPhaseCheck=nPhaseCheck,
                             nObsTotal=nObsTotal,
                             nObsPrePeak=nObsPrePeak,
                             nObsNearPeak=nObsNearPeak,
                             nFiltersNearPeak=nFiltersNearPeak,
                             nObsPostPeak=nObsPostPeak,
                             nFiltersPostPeak=nFiltersPostPeak)
    slicer = slicers.HealpixSlicer(nside=32)
    sql = extraSql + joiner + "note not like '%DD%'"
    md = extraMetadata
    if md is None:
        md = " NonDD"
    else:
        md += 'NonDD'
    bundle = mb.MetricBundle(metric,
                             slicer,
                             sql,
                             runName=runName,
                             summaryMetrics=standardStats,
                             plotFuncs=plotFuncs,
                             metadata=md,
                             displayDict=displayDict)
    bundleList.append(bundle)

    # XXX -- would be good to add some microlensing events, for both MW and LMC/SMC.

    #########################
    # Milky Way
    #########################

    displayDict = {'group': 'Milky Way', 'subgroup': ''}

    displayDict['subgroup'] = 'N stars'
    slicer = slicers.HealpixSlicer(nside=nside, useCache=False)
    sum_stats = [metrics.SumMetric(metricName='Total N Stars')]
    for f in filterlist:
        displayDict['order'] = filterorders[f]
        displayDict['caption'] = 'Number of stars in %s band with an measurement error due to crowding ' \
                                 'of less than 0.1 mag' % f
        # Configure the NstarsMetric - note 'filtername' refers to the filter in which to evaluate crowding
        metric = metrics.NstarsMetric(crowding_error=0.1,
                                      filtername='r',
                                      seeingCol=colmap['seeingGeom'],
                                      m5Col=colmap['fiveSigmaDepth'])
        plotDict = {'nTicks': 5, 'logScale': True, 'colorMin': 100}
        bundle = mb.MetricBundle(metric,
                                 slicer,
                                 filtersqls[f],
                                 runName=runName,
                                 summaryMetrics=sum_stats,
                                 plotFuncs=subsetPlots,
                                 plotDict=plotDict,
                                 displayDict=displayDict)
        bundleList.append(bundle)

    #########################
    # DDF
    #########################
    if DDF:
        # Hide this import to avoid adding a dependency.
        from lsst.sims.featureScheduler.surveys import generate_dd_surveys, Deep_drilling_survey
        ddf_surveys = generate_dd_surveys()

        # Add on the Euclid fields
        # XXX--to update. Should have a spot where all the DDF locations are stored.
        ddf_surveys.append(
            Deep_drilling_survey([], 58.97, -49.28, survey_name='DD:EDFSa'))
        ddf_surveys.append(
            Deep_drilling_survey([], 63.6, -47.60, survey_name='DD:EDFSb'))

        # For doing a high-res sampling of the DDF for co-adds
        ddf_radius = 1.8  # Degrees
        ddf_nside = 512

        ra, dec = hpid2RaDec(ddf_nside, np.arange(hp.nside2npix(ddf_nside)))

        displayDict = {'group': 'DDF depths', 'subgroup': None}

        for survey in ddf_surveys:
            displayDict['subgroup'] = survey.survey_name
            # Crop off the u-band only DDF
            if survey.survey_name[0:4] != 'DD:u':
                dist_to_ddf = angularSeparation(ra, dec, np.degrees(survey.ra),
                                                np.degrees(survey.dec))
                goodhp = np.where(dist_to_ddf <= ddf_radius)
                slicer = slicers.UserPointsSlicer(ra=ra[goodhp],
                                                  dec=dec[goodhp],
                                                  useCamera=False)
                for f in filterlist:
                    metric = metrics.Coaddm5Metric(
                        metricName=survey.survey_name + ', ' + f)
                    summary = [
                        metrics.MedianMetric(metricName='Median depth ' +
                                             survey.survey_name + ', ' + f)
                    ]
                    plotDict = {'color': colors[f]}
                    sql = filtersqls[f]
                    displayDict['order'] = filterorders[f]
                    displayDict['caption'] = 'Coadded m5 depth in %s band.' % (
                        f)
                    bundle = mb.MetricBundle(metric,
                                             slicer,
                                             sql,
                                             metadata=filtermetadata[f],
                                             displayDict=displayDict,
                                             summaryMetrics=summary,
                                             plotFuncs=[],
                                             plotDict=plotDict)
                    bundleList.append(bundle)

        displayDict = {'group': 'DDF Transients', 'subgroup': None}
        for survey in ddf_surveys:
            displayDict['subgroup'] = survey.survey_name
            if survey.survey_name[0:4] != 'DD:u':
                slicer = plasticc_slicer(
                    plcs=plasticc_models_dict['SNIa-normal'],
                    seed=42,
                    ra_cen=survey.ra,
                    dec_cen=survey.dec,
                    radius=np.radians(3.),
                    useCamera=False)
                metric = Plasticc_metric(metricName=survey.survey_name +
                                         ' SNIa')
                sql = extraSql
                summary_stats = [metrics.MeanMetric(maskVal=0)]
                plotFuncs = [plots.HealpixSkyMap()]
                bundle = mb.MetricBundle(metric,
                                         slicer,
                                         sql,
                                         runName=runName,
                                         summaryMetrics=summary_stats,
                                         plotFuncs=plotFuncs,
                                         metadata=extraMetadata,
                                         displayDict=displayDict)
                bundleList.append(bundle)
                displayDict['order'] = 10

    # Set the runName for all bundles and return the bundleDict.
    for b in bundleList:
        b.setRunName(runName)
    bundleDict = mb.makeBundlesDictFromList(bundleList)

    return bundleDict
Beispiel #5
0
def makeBundleList(dbFile, runName=None, nside=64, benchmark='design',
                   lonCol='fieldRA', latCol='fieldDec', seeingCol='seeingFwhmGeom'):
    """
    make a list of metricBundle objects to look at the scientific performance
    of an opsim run.
    """

    # List to hold everything we're going to make
    bundleList = []

    # List to hold metrics that shouldn't be saved
    noSaveBundleList = []

    # Connect to the databse
    opsimdb = db.OpsimDatabaseV4(dbFile)
    if runName is None:
        runName = os.path.basename(dbFile).replace('_sqlite.db', '')

    # Fetch the proposal ID values from the database
    propids, propTags = opsimdb.fetchPropInfo()

    # Fetch the telescope location from config
    lat, lon, height = opsimdb.fetchLatLonHeight()

    # Add metadata regarding dithering/non-dithered.
    commonname = ''.join([a for a in lonCol if a in latCol])
    if commonname == 'field':
        slicermetadata = ' (non-dithered)'
    else:
        slicermetadata = ' (%s)' % (commonname)

    # Construct a WFD SQL where clause so multiple propIDs can query by WFD:
    wfdWhere = opsimdb.createSQLWhere('WFD', propTags)
    print('#FYI: WFD "where" clause: %s' % (wfdWhere))
    ddWhere = opsimdb.createSQLWhere('DD', propTags)
    print('#FYI: DD "where" clause: %s' % (ddWhere))

    # Set up benchmark values, scaled to length of opsim run.
    runLength = opsimdb.fetchRunLength()
    if benchmark == 'requested':
        # Fetch design values for seeing/skybrightness/single visit depth.
        benchmarkVals = utils.scaleBenchmarks(runLength, benchmark='design')
        # Update nvisits with requested visits from config files.
        benchmarkVals['nvisits'] = opsimdb.fetchRequestedNvisits(propId=propTags['WFD'])
        # Calculate expected coadded depth.
        benchmarkVals['coaddedDepth'] = utils.calcCoaddedDepth(benchmarkVals['nvisits'],
                                                               benchmarkVals['singleVisitDepth'])
    elif (benchmark == 'stretch') or (benchmark == 'design'):
        # Calculate benchmarks for stretch or design.
        benchmarkVals = utils.scaleBenchmarks(runLength, benchmark=benchmark)
        benchmarkVals['coaddedDepth'] = utils.calcCoaddedDepth(benchmarkVals['nvisits'],
                                                               benchmarkVals['singleVisitDepth'])
    else:
        raise ValueError('Could not recognize benchmark value %s, use design, stretch or requested.'
                         % (benchmark))
    # Check that nvisits is not set to zero (for very short run length).
    for f in benchmarkVals['nvisits']:
        if benchmarkVals['nvisits'][f] == 0:
            print('Updating benchmark nvisits value in %s to be nonzero' % (f))
            benchmarkVals['nvisits'][f] = 1

    # Set values for min/max range of nvisits for All/WFD and DD plots. These are somewhat arbitrary.
    nvisitsRange = {}
    nvisitsRange['all'] = {'u': [20, 80], 'g': [50, 150], 'r': [100, 250],
                           'i': [100, 250], 'z': [100, 300], 'y': [100, 300]}
    nvisitsRange['DD'] = {'u': [6000, 10000], 'g': [2500, 5000], 'r': [5000, 8000],
                          'i': [5000, 8000], 'z': [7000, 10000], 'y': [5000, 8000]}
    # Scale these ranges for the runLength.
    scale = runLength / 10.0
    for prop in nvisitsRange:
        for f in nvisitsRange[prop]:
            for i in [0, 1]:
                nvisitsRange[prop][f][i] = int(np.floor(nvisitsRange[prop][f][i] * scale))

    # Filter list, and map of colors (for plots) to filters.
    filters = ['u', 'g', 'r', 'i', 'z', 'y']
    colors = {'u': 'cyan', 'g': 'g', 'r': 'y', 'i': 'r', 'z': 'm', 'y': 'k'}
    filtorder = {'u': 1, 'g': 2, 'r': 3, 'i': 4, 'z': 5, 'y': 6}

    # Easy way to run through all fi

    # Set up a list of common summary stats
    commonSummary = [metrics.MeanMetric(), metrics.RobustRmsMetric(), metrics.MedianMetric(),
                     metrics.PercentileMetric(metricName='25th%ile', percentile=25),
                     metrics.PercentileMetric(metricName='75th%ile', percentile=75),
                     metrics.MinMetric(), metrics.MaxMetric()]
    allStats = commonSummary

    # Set up some 'group' labels
    reqgroup = 'A: Required SRD metrics'
    depthgroup = 'B: Depth per filter'
    uniformitygroup = 'C: Uniformity'
    airmassgroup = 'D: Airmass distribution'
    seeinggroup = 'E: Seeing distribution'
    transgroup = 'F: Transients'
    sngroup = 'G: SN Ia'
    altAzGroup = 'H: Alt Az'
    rangeGroup = 'I: Range of Dates'
    intergroup = 'J: Inter-Night'
    phaseGroup = 'K: Max Phase Gap'
    NEOGroup = 'L: NEO Detection'

    # Set up an object to track the metricBundles that we want to combine into merged plots.
    mergedHistDict = {}

    # Set the histogram merge function.
    mergeFunc = plots.HealpixHistogram()

    keys = ['NVisits', 'coaddm5', 'NormEffTime', 'Minseeing', 'seeingAboveLimit', 'minAirmass',
            'fracAboveAirmass']

    for key in keys:
        mergedHistDict[key] = plots.PlotBundle(plotFunc=mergeFunc)

    ##
    # Calculate the fO metrics for all proposals and WFD only.
    order = 0
    for prop in ('All prop', 'WFD only'):
        if prop == 'All prop':
            metadata = 'All Visits' + slicermetadata
            sqlconstraint = ''
        if prop == 'WFD only':
            metadata = 'WFD only' + slicermetadata
            sqlconstraint = '%s' % (wfdWhere)
        # Configure the count metric which is what is used for f0 slicer.
        m1 = metrics.CountMetric(col='observationStartMJD', metricName='fO')
        plotDict = {'xlabel': 'Number of Visits', 'Asky': benchmarkVals['Area'],
                    'Nvisit': benchmarkVals['nvisitsTotal'], 'xMin': 0, 'xMax': 1500}
        summaryMetrics = [metrics.fOArea(nside=nside, norm=False, metricName='fOArea: Nvisits (#)',
                                         Asky=benchmarkVals['Area'], Nvisit=benchmarkVals['nvisitsTotal']),
                          metrics.fOArea(nside=nside, norm=True, metricName='fOArea: Nvisits/benchmark',
                                         Asky=benchmarkVals['Area'], Nvisit=benchmarkVals['nvisitsTotal']),
                          metrics.fONv(nside=nside, norm=False, metricName='fONv: Area (sqdeg)',
                                       Asky=benchmarkVals['Area'], Nvisit=benchmarkVals['nvisitsTotal']),
                          metrics.fONv(nside=nside, norm=True, metricName='fONv: Area/benchmark',
                                       Asky=benchmarkVals['Area'], Nvisit=benchmarkVals['nvisitsTotal'])]
        caption = 'The FO metric evaluates the overall efficiency of observing. '
        caption += ('fOArea: Nvisits = %.1f sq degrees receive at least this many visits out of %d. '
                    % (benchmarkVals['Area'], benchmarkVals['nvisitsTotal']))
        caption += ('fONv: Area = this many square degrees out of %.1f receive at least %d visits.'
                    % (benchmarkVals['Area'], benchmarkVals['nvisitsTotal']))
        displayDict = {'group': reqgroup, 'subgroup': 'F0', 'displayOrder': order, 'caption': caption}
        order += 1
        slicer = slicers.HealpixSlicer(nside=nside, lonCol=lonCol, latCol=latCol)

        bundle = metricBundles.MetricBundle(m1, slicer, sqlconstraint, plotDict=plotDict,
                                            displayDict=displayDict, summaryMetrics=summaryMetrics,
                                            plotFuncs=[plots.FOPlot()],
                                            runName=runName, metadata=metadata)
        bundleList.append(bundle)

    ###
    # Calculate the Rapid Revisit Metrics.
    order = 0
    metadata = 'All Visits' + slicermetadata
    sqlconstraint = ''
    dTmin = 40.0  # seconds
    dTmax = 30.0*60. # seconds
    minNvisit = 100
    pixArea = float(hp.nside2pixarea(nside, degrees=True))
    scale = pixArea * hp.nside2npix(nside)
    cutoff1 = 0.15
    extraStats1 = [metrics.FracBelowMetric(cutoff=cutoff1, scale=scale, metricName='Area (sq deg)')]
    extraStats1.extend(commonSummary)
    slicer = slicers.HealpixSlicer(nside=nside, lonCol=lonCol, latCol=latCol)
    m1 = metrics.RapidRevisitMetric(metricName='RapidRevisitUniformity',
                                    dTmin=dTmin / 60.0 / 60.0 / 24.0, dTmax=dTmax / 60.0 / 60.0 / 24.0,
                                    minNvisits=minNvisit)

    plotDict = {'xMin': 0, 'xMax': 1}
    summaryStats = extraStats1
    caption = 'Deviation from uniformity for short revisit timescales, between %s and %s seconds, ' % (
        dTmin, dTmax)
    caption += 'for pointings with at least %d visits in this time range. ' % (minNvisit)
    caption += 'Summary statistic "Area" below indicates the area on the sky which has a '
    caption += 'deviation from uniformity of < %.2f.' % (cutoff1)
    displayDict = {'group': reqgroup, 'subgroup': 'Rapid Revisit', 'displayOrder': order,
                   'caption': caption}
    bundle = metricBundles.MetricBundle(m1, slicer, sqlconstraint, plotDict=plotDict,
                                        displayDict=displayDict, summaryMetrics=summaryStats,
                                        runName=runName, metadata=metadata)
    bundleList.append(bundle)
    order += 1

    dTmax = dTmax/60.0 # need time in minutes for Nrevisits metric
    m2 = metrics.NRevisitsMetric(dT=dTmax)
    plotDict = {'xMin': 0.1, 'xMax': 2000, 'logScale': True}
    cutoff2 = 800
    extraStats2 = [metrics.FracAboveMetric(cutoff=cutoff2, scale=scale, metricName='Area (sq deg)')]
    extraStats2.extend(commonSummary)
    caption = 'Number of consecutive visits with return times faster than %.1f minutes, ' % (dTmax)
    caption += 'in any filter, all proposals. '
    caption += 'Summary statistic "Area" below indicates the area on the sky which has more than '
    caption += '%d revisits within this time window.' % (cutoff2)
    summaryStats = extraStats2
    displayDict = {'group': reqgroup, 'subgroup': 'Rapid Revisit', 'displayOrder': order,
                   'caption': caption}
    bundle = metricBundles.MetricBundle(m2, slicer, sqlconstraint, plotDict=plotDict,
                                        displayDict=displayDict, summaryMetrics=summaryStats,
                                        runName=runName, metadata=metadata)
    bundleList.append(bundle)
    order += 1
    m3 = metrics.NRevisitsMetric(dT=dTmax, normed=True)
    plotDict = {'xMin': 0, 'xMax': 1, 'cbarFormat': '%.1f'}
    cutoff3 = 0.6
    extraStats3 = [metrics.FracAboveMetric(cutoff=cutoff3, scale=scale, metricName='Area (sq deg)')]
    extraStats3.extend(commonSummary)
    summaryStats = extraStats3
    caption = 'Fraction of total visits where consecutive visits have return times faster '
    caption += 'than %.1f minutes, in any filter, all proposals. ' % (dTmax)
    caption += 'Summary statistic "Area" below indicates the area on the sky which has more '
    caption += 'than %d revisits within this time window.' % (cutoff3)
    displayDict = {'group': reqgroup, 'subgroup': 'Rapid Revisit', 'displayOrder': order,
                   'caption': caption}
    bundle = metricBundles.MetricBundle(m3, slicer, sqlconstraint, plotDict=plotDict,
                                        displayDict=displayDict, summaryMetrics=summaryStats,
                                        runName=runName, metadata=metadata)
    bundleList.append(bundle)
    order += 1

    # And add a histogram of the time between quick revisits.
    binMin = 0
    binMax = 120.
    binsize = 3.
    bins_metric = np.arange(binMin / 60.0 / 24.0, (binMax + binsize) / 60. / 24., binsize / 60. / 24.)
    bins_plot = bins_metric * 24.0 * 60.0
    m1 = metrics.TgapsMetric(bins=bins_metric, metricName='dT visits')
    plotDict = {'bins': bins_plot, 'xlabel': 'dT (minutes)'}
    caption = ('Histogram of the time between consecutive revisits (<%.1f minutes), over entire sky.'
               % (binMax))
    displayDict = {'group': reqgroup, 'subgroup': 'Rapid Revisit', 'order': order,
                   'caption': caption}
    slicer = slicers.HealpixSlicer(nside=nside, lonCol=lonCol, latCol=latCol)
    plotFunc = plots.SummaryHistogram()
    bundle = metricBundles.MetricBundle(m1, slicer, sqlconstraint, plotDict=plotDict,
                                        displayDict=displayDict, runName=runName,
                                        metadata=metadata, plotFuncs=[plotFunc])
    bundleList.append(bundle)
    order += 1

    ##
    # Trigonometric parallax and proper motion @ r=20 and r=24
    slicer = slicers.HealpixSlicer(nside=nside, lonCol=lonCol, latCol=latCol)
    sqlconstraint = ''
    order = 0
    metric = metrics.ParallaxMetric(metricName='Parallax 20', rmag=20, seeingCol=seeingCol)
    summaryStats = allStats
    plotDict = {'cbarFormat': '%.1f', 'xMin': 0, 'xMax': 3}
    displayDict = {'group': reqgroup, 'subgroup': 'Parallax', 'order': order,
                   'caption': 'Parallax precision at r=20. (without refraction).'}
    bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict,
                                        displayDict=displayDict, summaryMetrics=summaryStats,
                                        runName=runName, metadata=metadata)
    bundleList.append(bundle)
    order += 1
    metric = metrics.ParallaxMetric(metricName='Parallax 24', rmag=24, seeingCol=seeingCol)
    plotDict = {'cbarFormat': '%.1f', 'xMin': 0, 'xMax': 10}
    displayDict = {'group': reqgroup, 'subgroup': 'Parallax', 'order': order,
                   'caption': 'Parallax precision at r=24. (without refraction).'}
    bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict,
                                        displayDict=displayDict, summaryMetrics=summaryStats,
                                        runName=runName, metadata=metadata)
    bundleList.append(bundle)
    order += 1
    metric = metrics.ParallaxMetric(metricName='Parallax Normed', rmag=24, normalize=True,
                                    seeingCol=seeingCol)
    plotDict = {'xMin': 0.5, 'xMax': 1.0}
    displayDict = {'group': reqgroup, 'subgroup': 'Parallax', 'order': order,
                   'caption':
                   'Normalized parallax (normalized to optimum observation cadence, 1=optimal).'}
    bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict,
                                        displayDict=displayDict, summaryMetrics=summaryStats,
                                        runName=runName, metadata=metadata)
    bundleList.append(bundle)
    order += 1
    metric = metrics.ParallaxCoverageMetric(metricName='Parallax Coverage 20', rmag=20, seeingCol=seeingCol)
    plotDict = {}
    caption = "Parallax factor coverage for an r=20 star (0 is bad, 0.5-1 is good). "
    caption += "One expects the parallax factor coverage to vary because stars on the ecliptic "
    caption += "can be observed when they have no parallax offset while stars at the pole are always "
    caption += "offset by the full parallax offset."""
    displayDict = {'group': reqgroup, 'subgroup': 'Parallax', 'order': order,
                   'caption': caption}
    bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict,
                                        displayDict=displayDict, summaryMetrics=summaryStats,
                                        runName=runName, metadata=metadata)
    bundleList.append(bundle)
    order += 1
    metric = metrics.ParallaxCoverageMetric(metricName='Parallax Coverage 24', rmag=24, seeingCol=seeingCol)
    plotDict = {}
    caption = "Parallax factor coverage for an r=24 star (0 is bad, 0.5-1 is good). "
    caption += "One expects the parallax factor coverage to vary because stars on the ecliptic "
    caption += "can be observed when they have no parallax offset while stars at the pole are always "
    caption += "offset by the full parallax offset."""
    displayDict = {'group': reqgroup, 'subgroup': 'Parallax', 'order': order,
                   'caption': caption}
    bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict,
                                        displayDict=displayDict, summaryMetrics=summaryStats,
                                        runName=runName, metadata=metadata)
    bundleList.append(bundle)
    order += 1
    metric = metrics.ParallaxDcrDegenMetric(metricName='Parallax-DCR degeneracy 20', rmag=20,
                                            seeingCol=seeingCol)
    plotDict = {}
    caption = 'Correlation between parallax offset magnitude and hour angle an r=20 star.'
    caption += ' (0 is good, near -1 or 1 is bad).'
    displayDict = {'group': reqgroup, 'subgroup': 'Parallax', 'order': order,
                   'caption': caption}
    bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict,
                                        displayDict=displayDict, summaryMetrics=summaryStats,
                                        runName=runName, metadata=metadata)
    bundleList.append(bundle)
    order += 1
    metric = metrics.ParallaxDcrDegenMetric(metricName='Parallax-DCR degeneracy 24', rmag=24,
                                            seeingCol=seeingCol)
    plotDict = {}
    caption = 'Correlation between parallax offset magnitude and hour angle an r=24 star.'
    caption += ' (0 is good, near -1 or 1 is bad).'
    displayDict = {'group': reqgroup, 'subgroup': 'Parallax', 'order': order,
                   'caption': caption}
    bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict,
                                        displayDict=displayDict, summaryMetrics=summaryStats,
                                        runName=runName, metadata=metadata)
    bundleList.append(bundle)
    order += 1

    metric = metrics.ProperMotionMetric(metricName='Proper Motion 20', rmag=20, seeingCol=seeingCol)

    summaryStats = allStats
    plotDict = {'xMin': 0, 'xMax': 3}
    displayDict = {'group': reqgroup, 'subgroup': 'Proper Motion', 'order': order,
                   'caption': 'Proper Motion precision at r=20.'}
    bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict,
                                        displayDict=displayDict, summaryMetrics=summaryStats,
                                        runName=runName, metadata=metadata)
    bundleList.append(bundle)
    order += 1
    metric = metrics.ProperMotionMetric(rmag=24, metricName='Proper Motion 24', seeingCol=seeingCol)
    summaryStats = allStats
    plotDict = {'xMin': 0, 'xMax': 10}
    displayDict = {'group': reqgroup, 'subgroup': 'Proper Motion', 'order': order,
                   'caption': 'Proper Motion precision at r=24.'}
    bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict,
                                        displayDict=displayDict, summaryMetrics=summaryStats,
                                        runName=runName, metadata=metadata)
    bundleList.append(bundle)
    order += 1
    metric = metrics.ProperMotionMetric(rmag=24, normalize=True, metricName='Proper Motion Normed',
                                        seeingCol=seeingCol)
    plotDict = {'xMin': 0.2, 'xMax': 0.7}
    caption = 'Normalized proper motion at r=24. '
    caption += '(normalized to optimum observation cadence - start/end. 1=optimal).'
    displayDict = {'group': reqgroup, 'subgroup': 'Proper Motion', 'order': order,
                   'caption': caption}
    bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict,
                                        displayDict=displayDict, summaryMetrics=summaryStats,
                                        runName=runName, metadata=metadata)
    bundleList.append(bundle)
    order += 1

    ##
    # Calculate the time uniformity in each filter, for each year.
    order = 0

    slicer = slicers.HealpixSlicer(nside=nside, lonCol=lonCol, latCol=latCol)
    plotFuncs = [plots.TwoDMap()]
    step = 0.5
    bins = np.arange(0, 365.25 * 10 + 40, 40) - step
    metric = metrics.AccumulateUniformityMetric(bins=bins)
    plotDict = {'xlabel': 'Night (days)', 'xextent': [bins.min(
    ) + step, bins.max() + step], 'cbarTitle': 'Uniformity'}
    for f in filters:
        sqlconstraint = 'filter = "%s"' % (f)
        caption = 'Deviation from uniformity in %s band. ' % f
        caption += 'Northern Healpixels are at the top of the image.'
        caption += '(0=perfectly uniform, 1=perfectly nonuniform).'
        displayDict = {'group': uniformitygroup, 'subgroup': 'per night',
                       'order': filtorder[f], 'caption': caption}
        metadata = '%s band' % (f) + slicermetadata
        bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict,
                                            displayDict=displayDict, runName=runName, metadata=metadata,
                                            plotFuncs=plotFuncs)
        noSaveBundleList.append(bundle)

    ##
    # Depth metrics.
    slicer = slicers.HealpixSlicer(nside=nside, lonCol=lonCol, latCol=latCol)
    for f in filters:
        propCaption = '%s band, all proposals %s' % (f, slicermetadata)
        sqlconstraint = 'filter = "%s"' % (f)
        metadata = '%s band' % (f) + slicermetadata
        # Number of visits.
        metric = metrics.CountMetric(col='observationStartMJD', metricName='NVisits')
        plotDict = {'xlabel': 'Number of visits',
                    'xMin': nvisitsRange['all'][f][0],
                    'xMax': nvisitsRange['all'][f][1],
                    'colorMin': nvisitsRange['all'][f][0],
                    'colorMax': nvisitsRange['all'][f][1],
                    'binsize': 5,
                    'logScale': True, 'nTicks': 4, 'colorMin': 1}
        summaryStats = allStats
        displayDict = {'group': depthgroup, 'subgroup': 'Nvisits', 'order': filtorder[f],
                       'caption': 'Number of visits in filter %s, %s.' % (f, propCaption)}
        histMerge = {'color': colors[f], 'label': '%s' % (f),
                     'binsize': 5,
                     'xMin': nvisitsRange['all'][f][0], 'xMax': nvisitsRange['all'][f][1],
                     'legendloc': 'upper right'}
        bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict,
                                            displayDict=displayDict, runName=runName, metadata=metadata,
                                            summaryMetrics=summaryStats)
        mergedHistDict['NVisits'].addBundle(bundle, plotDict=histMerge)
        bundleList.append(bundle)
        # Coadded depth.
        metric = metrics.Coaddm5Metric()
        plotDict = {'zp': benchmarkVals['coaddedDepth'][f], 'xMin': -0.8, 'xMax': 0.8,
                    'xlabel': 'coadded m5 - %.1f' % benchmarkVals['coaddedDepth'][f]}
        summaryStats = allStats
        histMerge = {'legendloc': 'upper right', 'color': colors[f], 'label': '%s' % f, 'binsize': .02,
                     'xlabel': 'coadded m5 - benchmark value'}
        caption = ('Coadded depth in filter %s, with %s value subtracted (%.1f), %s. '
                   % (f, benchmark, benchmarkVals['coaddedDepth'][f], propCaption))
        caption += 'More positive numbers indicate fainter limiting magnitudes.'
        displayDict = {'group': depthgroup, 'subgroup': 'Coadded Depth',
                       'order': filtorder[f], 'caption': caption}
        bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict,
                                            displayDict=displayDict, runName=runName, metadata=metadata,
                                            summaryMetrics=summaryStats)
        mergedHistDict['coaddm5'].addBundle(bundle, plotDict=histMerge)
        bundleList.append(bundle)
        # Effective time.
        metric = metrics.TeffMetric(metricName='Normalized Effective Time', normed=True,
                                    fiducialDepth=benchmarkVals['singleVisitDepth'])
        plotDict = {'xMin': 0.1, 'xMax': 1.1}
        summaryStats = allStats
        histMerge = {'legendLoc': 'upper right', 'color': colors[f], 'label': '%s' % f, 'binsize': 0.02}
        caption = ('"Time Effective" in filter %s, calculated with fiducial single-visit depth of %s mag. '
                   % (f, benchmarkVals['singleVisitDepth'][f]))
        caption += 'Normalized by the fiducial time effective, if every observation was at '
        caption += 'the fiducial depth.'
        displayDict = {'group': depthgroup, 'subgroup': 'Time Eff.',
                       'order': filtorder[f], 'caption': caption}
        bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict,
                                            displayDict=displayDict, runName=runName, metadata=metadata,
                                            summaryMetrics=summaryStats)
        mergedHistDict['NormEffTime'].addBundle(bundle, plotDict=histMerge)
        bundleList.append(bundle)

    # Put in a z=0.5 Type Ia SN, based on Cambridge 2015 workshop notebook.
    # Check for 1) detection in any band, 2) detection on the rise in any band,
    # 3) good characterization
    peaks = {'uPeak': 25.9, 'gPeak': 23.6, 'rPeak': 22.6, 'iPeak': 22.7, 'zPeak': 22.7, 'yPeak': 22.8}
    peakTime = 15.
    transDuration = peakTime + 30.  # Days
    metric = metrics.TransientMetric(riseSlope=-2. / peakTime, declineSlope=1.4 / 30.0,
                                     transDuration=transDuration, peakTime=peakTime,
                                     surveyDuration=runLength,
                                     metricName='SNDetection', **peaks)
    caption = 'Fraction of z=0.5 type Ia SN that are detected in any filter'
    displayDict = {'group': transgroup, 'subgroup': 'Detected', 'caption': caption}
    sqlconstraint = ''
    metadata = '' + slicermetadata
    plotDict = {}
    bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict,
                                        displayDict=displayDict, runName=runName, metadata=metadata)
    bundleList.append(bundle)

    metric = metrics.TransientMetric(riseSlope=-2. / peakTime, declineSlope=1.4 / 30.0,
                                     transDuration=transDuration, peakTime=peakTime,
                                     surveyDuration=runLength,
                                     nPrePeak=1, metricName='SNAlert', **peaks)
    caption = 'Fraction of z=0.5 type Ia SN that are detected pre-peak in any filter'
    displayDict = {'group': transgroup, 'subgroup': 'Detected on the rise', 'caption': caption}
    plotDict = {}
    bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict,
                                        displayDict=displayDict, runName=runName, metadata=metadata)
    bundleList.append(bundle)

    metric = metrics.TransientMetric(riseSlope=-2. / peakTime, declineSlope=1.4 / 30.,
                                     transDuration=transDuration, peakTime=peakTime,
                                     surveyDuration=runLength, metricName='SNLots',
                                     nFilters=3, nPrePeak=3, nPerLC=2, **peaks)
    caption = 'Fraction of z=0.5 type Ia SN that are observed 6 times, 3 pre-peak, '
    caption += '3 post-peak, with observations in 3 filters'
    displayDict = {'group': transgroup, 'subgroup': 'Well observed', 'caption': caption}
    sqlconstraint = 'filter="r" or filter="g" or filter="i" or filter="z" '
    plotDict = {}
    bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict,
                                        displayDict=displayDict, runName=runName, metadata=metadata)
    bundleList.append(bundle)

    # Good seeing in r/i band metrics, including in first/second years.
    order = 0
    for tcolor, tlabel, timespan in zip(['k', 'g', 'r'], ['10 years', '1 year', '2 years'],
                                        ['', ' and night<=365', ' and night<=730']):
        order += 1
        for f in (['r', 'i']):
            sqlconstraint = 'filter = "%s" %s' % (f, timespan)
            propCaption = '%s band, all proposals %s, over %s.' % (f, slicermetadata, tlabel)
            metadata = '%s band, %s' % (f, tlabel) + slicermetadata
            seeing_limit = 0.7
            airmass_limit = 1.2
            metric = metrics.MinMetric(col=seeingCol)
            summaryStats = allStats
            plotDict = {'xMin': 0.35, 'xMax': 1.5, 'color': tcolor}
            displayDict = {'group': seeinggroup, 'subgroup': 'Best Seeing',
                           'order': filtorder[f] * 100 + order,
                           'caption': 'Minimum FWHMgeom values in %s.' % (propCaption)}
            histMerge = {'label': '%s %s' % (f, tlabel), 'color': tcolor,
                         'binsize': 0.03, 'xMin': 0.35, 'xMax': 1.5, 'legendloc': 'upper right'}
            bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict,
                                                displayDict=displayDict, runName=runName, metadata=metadata,
                                                summaryMetrics=summaryStats)
            mergedHistDict['Minseeing'].addBundle(bundle, plotDict=histMerge)
            bundleList.append(bundle)

            metric = metrics.FracAboveMetric(col=seeingCol, cutoff=seeing_limit)
            summaryStats = allStats
            plotDict = {'xMin': 0, 'xMax': 1.1, 'color': tcolor}
            displayDict = {'group': seeinggroup, 'subgroup': 'Good seeing fraction',
                           'order': filtorder[f] * 100 + order,
                           'caption': 'Fraction of total images with FWHMgeom worse than %.1f, in %s'
                           % (seeing_limit, propCaption)}
            histMerge = {'color': tcolor, 'label': '%s %s' % (f, tlabel),
                         'binsize': 0.05, 'legendloc': 'upper right'}
            bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict,
                                                displayDict=displayDict, runName=runName, metadata=metadata,
                                                summaryMetrics=summaryStats)
            mergedHistDict['seeingAboveLimit'].addBundle(bundle, plotDict=histMerge)
            bundleList.append(bundle)

            metric = metrics.MinMetric(col='airmass')
            plotDict = {'xMin': 1, 'xMax': 1.5, 'color': tcolor}
            summaryStats = allStats
            displayDict = {'group': airmassgroup, 'subgroup': 'Best Airmass',
                           'order': filtorder[f] * 100 + order, 'caption':
                           'Minimum airmass in %s.' % (propCaption)}
            histMerge = {'color': tcolor, 'label': '%s %s' % (f, tlabel),
                         'binsize': 0.03, 'legendloc': 'upper right'}
            bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict,
                                                displayDict=displayDict, runName=runName, metadata=metadata,
                                                summaryMetrics=summaryStats)
            mergedHistDict['minAirmass'].addBundle(bundle, plotDict=histMerge)
            bundleList.append(bundle)

            metric = metrics.FracAboveMetric(col='airmass', cutoff=airmass_limit)
            plotDict = {'xMin': 0, 'xMax': 1, 'color': tcolor}
            summaryStats = allStats
            displayDict = {'group': airmassgroup, 'subgroup': 'Low airmass fraction',
                           'order': filtorder[f] * 100 + order, 'caption':
                           'Fraction of total images with airmass higher than %.2f, in %s'
                           % (airmass_limit, propCaption)}
            histMerge = {'color': tcolor, 'label': '%s %s' % (
                f, tlabel), 'binsize': 0.05, 'legendloc': 'upper right'}

            bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict,
                                                displayDict=displayDict, runName=runName, metadata=metadata,
                                                summaryMetrics=summaryStats)
            mergedHistDict['fracAboveAirmass'].addBundle(bundle, plotDict=histMerge)
            bundleList.append(bundle)

# SNe metrics from UK workshop.


    peaks = {'uPeak': 25.9, 'gPeak': 23.6, 'rPeak': 22.6, 'iPeak': 22.7, 'zPeak': 22.7, 'yPeak': 22.8}
    peakTime = 15.
    transDuration = peakTime + 30.  # Days
    metric = metrics.TransientMetric(riseSlope=-2. / peakTime, declineSlope=1.4 / 30.0,
                                     transDuration=transDuration, peakTime=peakTime,
                                     surveyDuration=runLength,
                                     metricName='SNDetection', **peaks)
    caption = 'Fraction of z=0.5 type Ia SN that are detected at any point in their light curve in any filter'
    displayDict = {'group': sngroup, 'subgroup': 'Detected', 'caption': caption}
    sqlconstraint = ''
    plotDict = {}
    bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict,
                                        displayDict=displayDict, runName=runName)
    bundleList.append(bundle)

    metric = metrics.TransientMetric(riseSlope=-2. / peakTime, declineSlope=1.4 / 30.0,
                                     transDuration=transDuration, peakTime=peakTime,
                                     surveyDuration=runLength,
                                     nPrePeak=1, metricName='SNAlert', **peaks)
    caption = 'Fraction of z=0.5 type Ia SN that are detected pre-peak in any filter'
    displayDict = {'group': sngroup, 'subgroup': 'Detected on the rise', 'caption': caption}
    plotDict = {}
    bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict,
                                        displayDict=displayDict, runName=runName)
    bundleList.append(bundle)

    metric = metrics.TransientMetric(riseSlope=-2. / peakTime, declineSlope=1.4 / 30.,
                                     transDuration=transDuration, peakTime=peakTime,
                                     surveyDuration=runLength, metricName='SNLots',
                                     nFilters=3, nPrePeak=3, nPerLC=2, **peaks)
    caption = 'Fraction of z=0.5 type Ia SN that are observed 6 times, 3 pre-peak, '
    caption += '3 post-peak, with observations in 3 filters'
    displayDict = {'group': sngroup, 'subgroup': 'Well observed', 'caption': caption}
    sqlconstraint = 'filter="r" or filter="g" or filter="i" or filter="z" '
    plotDict = {}
    bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict,
                                        displayDict=displayDict, runName=runName)
    bundleList.append(bundle)

    propIDOrderDict = {}
    orderVal = 100
    for propID in propids:
        propIDOrderDict[propID] = orderVal
        orderVal += 100

    # Full range of dates:
    metric = metrics.FullRangeMetric(col='observationStartMJD')
    plotFuncs = [plots.HealpixSkyMap(), plots.HealpixHistogram()]
    caption = 'Time span of survey.'
    sqlconstraint = ''
    plotDict = {}
    displayDict = {'group': rangeGroup, 'caption': caption}

    bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict,
                                        displayDict=displayDict, runName=runName)
    bundleList.append(bundle)
    for f in filters:
        for propid in propids:
            displayDict = {'group': rangeGroup, 'subgroup': propids[propid], 'caption': caption,
                           'order': filtorder[f]}
            md = '%s, %s' % (f, propids[propid])
            sql = 'filter="%s" and proposalId=%i' % (f, propid)
            bundle = metricBundles.MetricBundle(metric, slicer, sql, plotDict=plotDict,
                                                metadata=md, plotFuncs=plotFuncs,
                                                displayDict=displayDict, runName=runName)
            bundleList.append(bundle)

    # Alt az plots
    slicer = slicers.HealpixSlicer(nside=64, latCol='zenithDistance', lonCol='azimuth', useCache=False)
    metric = metrics.CountMetric('observationStartMJD', metricName='Nvisits as function of Alt/Az')
    plotDict = {}
    plotFuncs = [plots.LambertSkyMap()]
    displayDict = {'group': altAzGroup, 'caption': 'Alt Az pointing distribution'}
    for f in filters:
        for propid in propids:
            displayDict = {'group': altAzGroup, 'subgroup': propids[propid],
                           'caption': 'Alt Az pointing distribution',
                           'order': filtorder[f]}
            md = '%s, %s' % (f, propids[propid])
            sql = 'filter="%s" and proposalId=%i' % (f, propid)
            bundle = metricBundles.MetricBundle(metric, slicer, sql, plotDict=plotDict,
                                                plotFuncs=plotFuncs, metadata=md,
                                                displayDict=displayDict, runName=runName)
            bundleList.append(bundle)

    sql = ''
    md = 'all observations'
    displayDict = {'group': altAzGroup, 'subgroup': 'All Observations',
                   'caption': 'Alt Az pointing distribution'}
    bundle = metricBundles.MetricBundle(metric, slicer, sql, plotDict=plotDict,
                                        plotFuncs=plotFuncs, metadata=md,
                                        displayDict=displayDict, runName=runName)
    bundleList.append(bundle)

    # Median inter-night gap (each and all filters)
    slicer = slicers.HealpixSlicer(nside=nside, lonCol=lonCol, latCol=latCol)
    metric = metrics.InterNightGapsMetric(metricName='Median Inter-Night Gap')
    sqls = ['filter = "%s"' % f for f in filters]
    orders = [filtorder[f] for f in filters]
    orders.append(0)
    sqls.append('')
    for sql, order in zip(sqls, orders):
        displayDict = {'group': intergroup, 'subgroup': 'Median Gap', 'caption': 'Median gap between days',
                       'order': order}
        bundle = metricBundles.MetricBundle(metric, slicer, sql, displayDict=displayDict, runName=runName)
        bundleList.append(bundle)

    # Max inter-night gap in r and all bands
    dslicer = slicers.HealpixSlicer(nside=nside, lonCol='ditheredRA', latCol='ditheredDec')
    metric = metrics.InterNightGapsMetric(metricName='Max Inter-Night Gap', reduceFunc=np.max)

    plotDict = {'percentileClip': 95.}
    for sql, order in zip(sqls, orders):
        displayDict = {'group': intergroup, 'subgroup': 'Max Gap', 'caption': 'Max gap between nights',
                       'order': order}
        bundle = metricBundles.MetricBundle(metric, dslicer, sql, displayDict=displayDict,
                                            plotDict=plotDict, runName=runName)
        bundleList.append(bundle)

    # largest phase gap for periods
    periods = [0.1, 1.0, 10., 100.]
    sqls = {'u': 'filter = "u"', 'r': 'filter="r"',
            'g,r,i,z': 'filter="g" or filter="r" or filter="i" or filter="z"',
            'all': ''}

    for sql in sqls:
        for period in periods:
            displayDict = {'group': phaseGroup,
                           'subgroup': 'period=%.2f days, filter=%s' % (period, sql),
                           'caption': 'Maximum phase gaps'}
            metric = metrics.PhaseGapMetric(nPeriods=1, periodMin=period, periodMax=period,
                                            metricName='PhaseGap, %.1f' % period)
            bundle = metricBundles.MetricBundle(metric, slicer, sqls[sql],
                                                displayDict=displayDict, runName=runName)
            bundleList.append(bundle)

    # NEO XY plots
    slicer = slicers.UniSlicer()
    metric = metrics.PassMetric(metricName='NEODistances')
    stacker = stackers.NEODistStacker()
    stacker2 = stackers.EclipticStacker()
    for f in filters:
        plotFunc = plots.NeoDistancePlotter(eclipMax=10., eclipMin=-10.)
        caption = 'Observations within 10 degrees of the ecliptic. Distance an H=22 NEO would be detected'
        displayDict = {'group': NEOGroup, 'subgroup': 'xy', 'order': filtorder[f],
                       'caption': caption}
        plotDict = {}
        sqlconstraint = 'filter = "%s"' % (f)
        bundle = metricBundles.MetricBundle(metric, slicer,
                                            sqlconstraint, displayDict=displayDict,
                                            stackerList=[stacker, stacker2],
                                            plotDict=plotDict,
                                            plotFuncs=[plotFunc])
        noSaveBundleList.append(bundle)

    # Solar elongation
    sqls = ['filter = "%s"' % f for f in filters]
    orders = [filtorder[f] for f in filters]
    sqls.append('')
    orders.append(0)
    for sql, order in zip(sqls, orders):
        plotFuncs = [plots.HealpixSkyMap(), plots.HealpixHistogram()]
        displayDict = {'group': NEOGroup, 'subgroup': 'Solar Elongation',
                       'caption': 'Median solar elongation in degrees', 'order': order}
        metric = metrics.MedianMetric('solarElong')
        slicer = slicers.HealpixSlicer(nside=nside, lonCol=lonCol, latCol=latCol)
        bundle = metricBundles.MetricBundle(metric, slicer, sql, displayDict=displayDict,
                                            plotFuncs=plotFuncs)
        bundleList.append(bundle)

        plotFuncs = [plots.HealpixSkyMap(), plots.HealpixHistogram()]
        displayDict = {'group': NEOGroup, 'subgroup': 'Solar Elongation',
                       'caption': 'Minimum solar elongation in degrees', 'order': order}
        metric = metrics.MinMetric('solarElong')
        slicer = slicers.HealpixSlicer(nside=nside, lonCol=lonCol, latCol=latCol)
        bundle = metricBundles.MetricBundle(metric, slicer, sql, displayDict=displayDict,
                                            plotFuncs=plotFuncs)
        bundleList.append(bundle)

    return (metricBundles.makeBundlesDictFromList(bundleList), mergedHistDict,
            metricBundles.makeBundlesDictFromList(noSaveBundleList))
Beispiel #6
0
def go(nside=64, rmag=21., SedTemplate='flat', DoRun=False, LFilters = [], \
           LNightMax=[], nightMax=1e4, \
           CustomPlotLimits=True, \
           RunOne=False, MaxRuns=1e3, \
           SpatialClip=95., \
           seeingCol='FWHMeff', \
           sCmap='cubehelix_r', \
           checkCorrKind=False, \
           wfdPlane=True, \
           useGRIZ=False):

    # Go to the directory where the sqlite databases are held...

    # cd /Users/clarkson/Data/LSST/OpSimRuns/opsim20160411


    # WIC 2015-12-29 - set up for a master-run with all cases, this time with plotting limits
    # Break the specifications across lines to make subdivision easier
    
    # Subsets by time first, then by filter, finally the whole shebang

    # 2016-04-23 - replaced enigma_1189 --> minion_1016
    # 2016-04-23 - replaced ops2_1092 --> minion_1020
    
    
    # (Yes the inversion of the first two is deliberate.)
    runNames = ['minion_1016', 'minion_1020', 'minion_1020', 'minion_1016', \
                    'minion_1020', 'minion_1016', 'minion_1020', 'minion_1016', \
                    'minion_1020', 'minion_1016']
    LFilters = ['', '', '', '', \
                    'u', 'u', 'y', 'y', \
                    '', '']  
    LNightMax = [365, 365, 730, 730, \
                     1e4, 1e4, 1e4, 1e4, \
                     1e4, 1e4]

    # WIC try again, this time on the new astro_lsst_01_1004 only
    if wfdPlane:
        LFilters = ['', '', '', 'u', 'y']  
        LNightMax = [365, 730, 1e4, 1e4, 1e4]
        runNames = ['astro_lsst_01_1004' for i in range (len(LFilters)) ]

    # WIC 2016-05-01 check correlation
    if checkCorrKind:
        LFilters = ['', '']
        LNightMax = [365, 365]
        runNames = ['minion_1016', 'minion_1016']

        # Type of correlation used for HA Degen 
        # checkCorrKind = True
        useSpearmanR = [False, True]
    
    if useGRIZ:
        runNames=['minion_1016','astro_lsst_01_1004', 'minion_1020']
        LFilters = ['griz' for iRun in range(len(runNames)) ]
        #LNightMax = [1e4 for iRun in range(len(runNames)) ]
        #LNightMax = [730 for iRun in range(len(runNames)) ]
        LNightMax = [365 for iRun in range(len(runNames)) ]


    # List of upper limits to parallax and proper motion error. For parallax, 3.0 mas is probably good
    LUpperParallax = []
    LUpperPropmotion = []


    if CustomPlotLimits:
    
        LUpperParallax = [10, 10, 10, 10, \
                              10, 10, 40, 40, \
                              3.0, 3.0 ]

    
        # For proper motion, it's a little tricky to say because the
        # regular case is so pathological for the field. Try the following:
        LUpperPropmotion = [40, 40, 5, 20, \
                                3.5, 20, 3.5, 20, \
                                0.5, 5]

        if len(runNames) < 2:
            LUpperPropmotion = [100 for i in range(len(runNames))]

    print "runAstrom.go INFO - will run the following:"
    for iSho in range(len(runNames)):
        sFilThis = ''
        # print iSho, len(LFilters)
        if iSho <= len(LFilters):
            sFilThis = sqlFromFilterString(LFilters[iSho])

        print "%i: %-12s, %1s, %i, sqlFilter -- %s" % (iSho, runNames[iSho], LFilters[iSho], LNightMax[iSho], sFilThis)
    print "==========================="

    print "mag max = %.2f" % (rmag)
    print "---------------------------"

#    print runNames
#    if not DoRun:
#        print "Set DoRun=True to actually run this."
#        print len(LFilters), len(runNames), len(LFilters) == len(runNames)
#        return

#'kraken_1038', 'kraken_1034', 'ops2_1098']


    # nside = 64

    slicer = slicers.HealpixSlicer(nside=nside)

    # Make it so we don't bother with the silly power spectra
    plotFuncs = [plots.HealpixSkyMap(), plots.HealpixHistogram()]

    # WIC - back up the plotting arguments with a default value
    plotFuncsPristine = copy.deepcopy(plotFuncs)

    # WIC - the only way this will make sense to me is if I make a
    # dictionary of plot arguments. Let's try it...
    DPlotArgs = {}
    for plotArg in ['parallax', 'propmotion', 'coverage', 'HAdegen']:
        DPlotArgs[plotArg] = copy.deepcopy(plotFuncs)

    if CustomPlotLimits:

        # Use the same color map for all the metrics
        for plotMetric in DPlotArgs.keys():
            DPlotArgs[plotMetric][0].defaultPlotDict['cmap'] = sCmap


        # Apply spatial clipping for all but the HADegen, for which we
        # have other limits...
        for plotMetric in ['parallax', 'propmotion', 'coverage']:
            DPlotArgs[plotMetric][0].defaultPlotDict['percentileClip'] = SpatialClip

        # Some limits common to spatial maps and histograms
        for iPl in range(0,2):
            DPlotArgs['propmotion'][iPl].defaultPlotDict['logScale'] = True

        # NOT a loop because we might want to separate out the behavior

        # Standardized range for the histograms for new parallax metrics
        DPlotArgs['coverage'][1].defaultPlotDict['xMin'] = 0.
        DPlotArgs['coverage'][1].defaultPlotDict['xMax'] = 1.
        DPlotArgs['HAdegen'][1].defaultPlotDict['xMin'] = -1.
        DPlotArgs['HAdegen'][1].defaultPlotDict['xMax'] =  1.
            
        # Standardize the sky map for the HAdegen as well.
        DPlotArgs['coverage'][1].defaultPlotDict['xMin'] = 0.
        DPlotArgs['coverage'][1].defaultPlotDict['xMax'] = 1.
        DPlotArgs['HAdegen'][0].defaultPlotDict['xMin'] = -1.
        DPlotArgs['HAdegen'][0].defaultPlotDict['xMax'] =  1.


        # Standardize at least the lower bound of the histogram in
        # both the proper motion and parallax errors. Upper limit we
        # can customize with a loop.
        DPlotArgs['propmotion'][1].defaultPlotDict['xMin'] = 1e-2  # should not be zero if log scale!!
        DPlotArgs['parallax'][1].defaultPlotDict['xMin'] = 0.


    # WIC - try changing the plot dictionary

    if not DoRun:
        plotFuncs[0].defaultPlotDict['logScale'] = True
        print DPlotArgs['propmotion'][0].defaultPlotDict
        print DPlotArgs['propmotion'][1].defaultPlotDict
        
        return

    # The old runs have the seeing in finSeeing
    #seeingCol = 'finSeeing'

    ### UPDATE THE SEEING COLUMN
    #seeingCol = 'FWHMeff'   ## Moved up to a command-line argument


    # Use all the observations. Can change if you want a different
    # time span
    # sqlconstraint = ''

    # list of sqlconstraints now used, which gets handled within the loop.

    # run some summary stats on everything
    summaryMetrics = [metrics.MedianMetric()]

    tStart = time.time()

    # Running one, or the whole lot?
    RunMax = len(runNames)

    # allow user to set a different number (say, 2)
    if MaxRuns < RunMax and MaxRuns > 0:
        RunMax = int(MaxRuns)

    # the following keyword overrides
    if RunOne:
        RunMax = 1

    print "Starting runs. RunMax = %i" % (RunMax)

    for iRun in range(RunMax):
        run = runNames[iRun][:]

    # for run in runNames:
        # Open the OpSim database
        timeStartIteration = time.time()

        # Some syntax added to test for existence of the database
        dbFil = run+'_sqlite.db'
        if not os.access(dbFil, os.R_OK):
            print "runAstrom.go FATAL - cannot acces db file %s" % (dbFil)
            print "runAstrom.go FATAL - skipping run %s" % (run)
            continue
    
        else:
            deltaT = time.time()-tStart
            print "runAstrom.go INFO - ##################################"
            print "runAstrom.go INFO - starting run %s with nside=%i after %.2f minutes" \
                % (run, nside, deltaT/60.)

        opsdb = db.OpsimDatabase(run+'_sqlite.db')

        # Set SQL constraint appropriate for each filter in the
        # list. If we supplied a list of filters, use it for 
        sqlconstraint = ''
        ThisFilter = 'ugrizy'
        if len(LFilters) == len(runNames):

            # Only change the filter if one was actually supplied!
            if len(LFilters[iRun]) > 0:
                ThisFilter = LFilters[iRun]

                sqlconstraint = sqlFromFilterString(ThisFilter)

###                sqlconstraint = 'filter = "%s"' % (ThisFilter)


        # If nightmax was supplied, use it 
        ThisNightMax = int(nightMax)  # copy not view
        if len(LNightMax) == len(runNames):

            # Only update nightmax if one was given
            try:
                ThisNightMax = int(LNightMax[iRun])  # This might be redundant with the fmt statement below.
                if len(sqlconstraint) < 1:
                    sqlconstraint = 'night < %i' % (ThisNightMax)
                else:
                    sqlconstraint = '%s and night < %i' % (sqlconstraint, ThisNightMax)
            except:
                print "runAstrom.go WARN - run %i problem with NightMax" % (iRun)
                dumdum = 1.

        # Set where the output should go - include the filter!! 
        sMag = '%.1f' % (rmag)
        sMag = sMag.replace(".","p")
        outDir = './metricEvals/%s_nside%i_%s_n%i_r%s' % (run, nside, ThisFilter, ThisNightMax, sMag)

        # Ensure we'll be able to find this later on...
        if CustomPlotLimits:
            outDir = '%s_lims' % (outDir)

        # if we are testing the kind of correlation used, include that
        # in the output here.
        if checkCorrKind:
            if useSpearmanR[iRun]:
                sCorr = 'spearmanR'
            else:
                sCorr = 'pearsonR'
        
            outDir = '%s_%s' % (outDir, sCorr)

        # From this point onwards, stuff actually gets run. This is
        # the place to output what will actually happen next.
        print "runAstrom.go INFO - about to run:"
        print "runAstrom.go INFO - sqlconstraint: %s ; run name %s ; nside %i" % (sqlconstraint, run, nside)
        print "runAstrom.go INFO - output directory will be %s" % (outDir)
        if not DoRun:
            continue

        # ensure the output directory actually exists...
        if not os.access(outDir, os.R_OK):
            print "runAstrom.go INFO - creating output directory %s" % (outDir)
            os.makedirs(outDir)


        resultsDb = db.ResultsDb(outDir=outDir)
        bundleList = []

        # WIC - to make this at least somewhat uniform, build the plot
        # functions including arguments out of our copies above.
        plotFuncsPropmotion = copy.deepcopy(DPlotArgs['propmotion'])
        plotFuncsParallax = copy.deepcopy(DPlotArgs['parallax'])
        plotFuncsCoverage = copy.deepcopy(DPlotArgs['coverage'])
        plotFuncsHAdegen = copy.deepcopy(DPlotArgs['HAdegen'])

        # if using custom plot limits, will want to include the limits
        # for proper motion and parallax too... programming a bit defensively 
        # here, including an extra check (rather than just the length of the lists 
        # above). 
        if CustomPlotLimits:
            if len(LUpperParallax) == len(runNames):
                plotFuncsParallax[1].defaultPlotDict['xMax'] = float(LUpperParallax[iRun])

            if len(LUpperPropmotion) == len(runNames):
                plotFuncsPropmotion[1].defaultPlotDict['xMax'] = float(LUpperPropmotion[iRun])

        # Configure the metrics
        metric = metrics.ParallaxMetric(rmag=rmag, seeingCol=seeingCol, SedTemplate=SedTemplate)
        bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, runName=run,
#                                            plotFuncs=plotFuncs, \
                                                plotFuncs = plotFuncsParallax, \
                                                summaryMetrics=summaryMetrics)
        bundleList.append(bundle)

        metric=metrics.ProperMotionMetric(rmag=rmag, seeingCol=seeingCol, SedTemplate=SedTemplate)
        bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, runName=run,
#                                            plotFuncs=plotFuncs, \
                                                plotFuncs=plotFuncsPropmotion, \
                                                summaryMetrics=summaryMetrics)
        bundleList.append(bundle)

        metric = calibrationMetrics.ParallaxCoverageMetric(rmag=rmag, seeingCol=seeingCol, SedTemplate=SedTemplate)
        bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, runName=run,
#                                            plotFuncs=plotFuncs, \
                                                plotFuncs=plotFuncsCoverage, \
                                                summaryMetrics=summaryMetrics)
        bundleList.append(bundle)

        # Now for the HA Degen metric. If testing the type of
        # correlation, call the metric differently here. Since the
        # argument to actually do this is only part of my github fork
        # at the moment, we use a different call. Running with default
        # arguments (checkCorrKind=False) should then work without
        # difficulty.
        metric = calibrationMetrics.ParallaxHADegenMetric(rmag=rmag, seeingCol=seeingCol, SedTemplate=SedTemplate)
        if checkCorrKind:
            metric = calibrationMetrics.ParallaxHADegenMetric(rmag=rmag, seeingCol=seeingCol, SedTemplate=SedTemplate, useSpearmanR=useSpearmanR[iRun])
            print "TESTING CORRELATION KIND -- useSpearmanR", useSpearmanR[iRun]
            

        bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, runName=run,
#                                            plotFuncs=plotFuncs, \
                                                plotFuncs=plotFuncsHAdegen, \
                                                summaryMetrics=summaryMetrics)
        bundleList.append(bundle)

        # Run everything and make plots
        bundleDict = metricBundles.makeBundlesDictFromList(bundleList)
        bgroup = metricBundles.MetricBundleGroup(bundleDict, opsdb, outDir=outDir, resultsDb=resultsDb)
#        try:
        bgroup.runAll()

        print "runAstrom.go INFO - bundles took %.2f minutes" \
            % ((time.time() - timeStartIteration) / 60.)

#        except KeyboardInterrupt:
#            print "runAstrom.go FATAL - keyboard interrupt detected. Halting."
#            return
        bgroup.plotAll()

        print "runAstrom.go INFO - bundles + plotting took %.2f minutes" \
            % ((time.time() - timeStartIteration) / 60.)
        

    print "Finished entire set. %i runs took %.2f minutes." % (iRun + 1, (time.time()-tStart)/60.)
Beispiel #7
0
def go(nside=64, rmag=20., SedTemplate='flat', DoRun=False, LFilters = [], \
           LNightMax=[], nightMax=1e4, \
           CustomPlotLimits=True, \
           RunOne=False, MaxRuns=1e3, \
           SpatialClip=95.):

    # runNames = ['enigma_1189', 'ops2_1093']

    # runNames
    #runNames = ['ops2_1092', 'kraken_1038', 'kraken_1034', 'ops2_1098']
    #runNames = ['kraken_1038', 'kraken_1034', 'ops2_1098']

    # 2015-12-23 - put kraken_1038 at the end, it seems to run
    # extremely slowly...
    runNames = ['enigma_1189', 'ops2_1098', 'kraken_1034', 'kraken_1038']

    runNames = ['ops2_1092', 'kraken_1033', 'enigma_1271']

    # UPDATE - ops2_1092 ran quite quickly on nside=32... rerun on 64

    runNames = ['ops2_1092', 'enigma_1189', 'enigma_1271', 'kraken_1038']
    
    # UPDATE 2015-12-28 -- run with single-filter choices, compare
    # enigma to ops2_1092

    # WIC 2015-12-28 -- try with single-filter and all then small subset
    runNames = ['ops2_1092', 'ops2_1092', 'ops2_1092', 'enigma_1189', 'enigma_1189', 'enigma_1189']
    LFilters = ["u", "y", '',   "u", "y", '']
    LNightMax = [1e4, 1e4, 730, 1e4, 1e4, 730]

    # WIC 2015-12-28 - 23:00 - try using a different SED template,
    # just go with single filters for now
    #
    # DO WE NEED THIS??
    

    # WIC 2015-12-28 - 22:00; much to my surprise, that took less than
    # half an hour to go all the way through. Try again, this time using slightly more filters.    
    runNames = ['ops2_1092', 'enigma_1189', 'ops2_1092', 'enigma_1189']
    LFilters = ['', '', 'griz', 'griz']  # (griz was not recognized)

    # WIC 2015-12-29 - set up for a master-run with all cases, this time with plotting limits
    # Break the specifications across lines to make subdivision easier
    
    # Subsets by time first, then by filter, finally the whole shebang
    
    # (Yes the inversion of the first two is deliberate.)
    runNames = ['enigma_1189', 'ops2_1092', 'ops2_1092', 'enigma_1189', \
                    'ops2_1092', 'enigma_1189', 'ops2_1092', 'enigma_1189', \
                    'ops2_1092', 'enigma_1189']
    LFilters = ['', '', '', '', \
                    'u', 'u', 'y', 'y', \
                    '', '']  
    LNightMax = [365, 365, 730, 730, \
                     1e4, 1e4, 1e4, 1e4, \
                     1e4, 1e4]

    # List of upper limits to parallax and proper motion error. For parallax, 3.0 mas is probably good
    LUpperParallax = []
    LUpperPropmotion = []

    if CustomPlotLimits:
    
        LUpperParallax = [10, 10, 10, 10, \
                              10, 10, 40, 40, \
                              3.0, 3.0 ]
    
        # For proper motion, it's a little tricky to say because the
        # regular case is so pathological for the field. Try the following:
        LUpperPropmotion = [40, 40, 5, 20, \
                                3.5, 20, 3.5, 20, \
                                0.5, 5]


    print "runAstrom.go INFO - will run the following:"
    for iSho in range(len(runNames)):
        print "%i: %-12s, %1s, %i" % (iSho, runNames[iSho], LFilters[iSho], LNightMax[iSho])
    print "==========================="

#    print runNames
#    if not DoRun:
#        print "Set DoRun=True to actually run this."
#        print len(LFilters), len(runNames), len(LFilters) == len(runNames)
#        return

#'kraken_1038', 'kraken_1034', 'ops2_1098']


    # nside = 64

    slicer = slicers.HealpixSlicer(nside=nside)

    # Make it so we don't bother with the silly power spectra
    plotFuncs = [plots.HealpixSkyMap(), plots.HealpixHistogram()]

    # WIC - back up the plotting arguments with a default value
    plotFuncsPristine = copy.deepcopy(plotFuncs)

    # WIC - the only way this will make sense to me is if I make a
    # dictionary of plot arguments. Let's try it...
    DPlotArgs = {}
    for plotArg in ['parallax', 'propmotion', 'coverage', 'HAdegen']:
        DPlotArgs[plotArg] = copy.deepcopy(plotFuncs)

    if CustomPlotLimits:

        # All spatial maps use percentile clipping
        for plotMetric in DPlotArgs.keys():
            DPlotArgs[plotMetric][0].defaultPlotDict['percentileClip'] = SpatialClip

        # Some limits common to spatial maps and histograms
        for iPl in range(0,2):
            DPlotArgs['propmotion'][iPl].defaultPlotDict['logScale'] = True
            
        # Standardized range for the histograms for new parallax metrics
        DPlotArgs['coverage'][1].defaultPlotDict['xMin'] = 0.
        DPlotArgs['coverage'][1].defaultPlotDict['xMax'] = 1.
        DPlotArgs['HAdegen'][1].defaultPlotDict['xMin'] = -1.
        DPlotArgs['HAdegen'][1].defaultPlotDict['xMax'] =  1.
            
        # Standardize at least the lower bound of the histogram in
        # both the proper motion and parallax errors. Upper limit we
        # can customize with a loop.
        DPlotArgs['propmotion'][1].defaultPlotDict['xMin'] = 1e-2  # should not be zero if log scale!!
        DPlotArgs['parallax'][1].defaultPlotDict['xMin'] = 0.


    # WIC - try changing the plot dictionary

    if not DoRun:
        plotFuncs[0].defaultPlotDict['logScale'] = True
        print DPlotArgs['propmotion'][0].defaultPlotDict
        print DPlotArgs['propmotion'][1].defaultPlotDict
        
        return

    # The old runs have the seeing in finSeeing
    seeingCol = 'finSeeing'

    # Try it out for a 20th mag star with a flat SED (can change mag
    # or to OBAFGKM)
    # rmag = 20. ## NOW AN ARGUMENT
    #SedTemplate='flat'

    # Use all the observations. Can change if you want a different
    # time span
    sqlconstraint = ''

    # list of sqlconstraints now used, which gets handled within the loop.

    # run some summary stats on everything
    summaryMetrics = [metrics.MedianMetric()]

    tStart = time.time()

    # Running one, or the whole lot?
    RunMax = len(runNames)

    # allow user to set a different number (say, 2)
    if MaxRuns < RunMax and MaxRuns > 0:
        RunMax = int(MaxRuns)

    # the following keyword overrides
    if RunOne:
        RunMax = 1

    print "Starting runs. RunMax = %i" % (RunMax)

    for iRun in range(RunMax):
        run = runNames[iRun][:]

    # for run in runNames:
        # Open the OpSim database
        timeStartIteration = time.time()

        # Some syntax added to test for existence of the database
        dbFil = run+'_sqlite.db'
        if not os.access(dbFil, os.R_OK):
            print "runAstrom.go FATAL - cannot acces db file %s" % (dbFil)
            print "runAstrom.go FATAL - skipping run %s" % (run)
            continue
    
        else:
            deltaT = time.time()-tStart
            print "runAstrom.go INFO - ##################################"
            print "runAstrom.go INFO - starting run %s with nside=%i after %.2f minutes" \
                % (run, nside, deltaT/60.)

        opsdb = db.OpsimDatabase(run+'_sqlite.db')

        # Set SQL constraint appropriate for each filter in the
        # list. If we supplied a list of filters, use it for 
        sqlconstraint = ''
        ThisFilter = 'ugrizy'
        if len(LFilters) == len(runNames):

            # Only change the filter if one was actually supplied!
            if len(LFilters[iRun]) == 1:
                ThisFilter = LFilters[iRun]
                sqlconstraint = 'filter = "%s"' % (ThisFilter)

        # If nightmax was supplied, use it 
        ThisNightMax = int(nightMax)  # copy not view
        if len(LNightMax) == len(runNames):

            # Only update nightmax if one was given
            try:
                ThisNightMax = int(LNightMax[iRun])  # This might be redundant with the fmt statement below.
                if len(sqlconstraint) < 1:
                    sqlconstraint = 'night < %i' % (ThisNightMax)
                else:
                    sqlconstraint = '%s and night < %i' % (sqlconstraint, ThisNightMax)
            except:
                print "runAstrom.go WARN - run %i problem with NightMax" % (iRun)
                dumdum = 1.

        # Set where the output should go - include the filter!! 
        sMag = '%.1f' % (rmag)
        sMag = sMag.replace(".","p")
        outDir = '%s_nside%i_%s_n%i_r%s' % (run, nside, ThisFilter, ThisNightMax, sMag)

        # Ensure we'll be able to find this later on...
        if CustomPlotLimits:
            outDir = '%s_lims' % (outDir)

        # From this point onwards, stuff actually gets run. This is
        # the place to output what will actually happen next.
        print "runAstrom.go INFO - about to run:"
        print "runAstrom.go INFO - sqlconstraint: %s ; run name %s ; nside %i" % (sqlconstraint, run, nside)
        print "runAstrom.go INFO - output directory will be %s" % (outDir)
        if not DoRun:
            continue

        resultsDb = db.ResultsDb(outDir=outDir)
        bundleList = []

        # WIC - to make this at least somewhat uniform, build the plot
        # functions including arguments out of our copies above.
        plotFuncsPropmotion = copy.deepcopy(DPlotArgs['propmotion'])
        plotFuncsParallax = copy.deepcopy(DPlotArgs['parallax'])
        plotFuncsCoverage = copy.deepcopy(DPlotArgs['coverage'])
        plotFuncsHAdegen = copy.deepcopy(DPlotArgs['HAdegen'])

        # if using custom plot limits, will want to include the limits
        # for proper motion and parallax too... programming a bit defensively 
        # here, including an extra check (rather than just the length of the lists 
        # above). 
        if CustomPlotLimits:
            if len(LUpperParallax) == len(runNames):
                plotFuncsParallax[1].defaultPlotDict['xMax'] = float(LUpperParallax[iRun])

            if len(LUpperPropmotion) == len(runNames):
                plotFuncsPropmotion[1].defaultPlotDict['xMax'] = float(LUpperPropmotion[iRun])

        # Configure the metrics
        metric = metrics.ParallaxMetric(rmag=rmag, seeingCol=seeingCol, SedTemplate=SedTemplate)
        bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, runName=run,
#                                            plotFuncs=plotFuncs, \
                                                plotFuncs = plotFuncsParallax, \
                                                summaryMetrics=summaryMetrics)
        bundleList.append(bundle)

        metric=metrics.ProperMotionMetric(rmag=rmag, seeingCol=seeingCol, SedTemplate=SedTemplate)
        bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, runName=run,
#                                            plotFuncs=plotFuncs, \
                                                plotFuncs=plotFuncsPropmotion, \
                                                summaryMetrics=summaryMetrics)
        bundleList.append(bundle)

        metric = metrics.ParallaxCoverageMetric(rmag=rmag, seeingCol=seeingCol, SedTemplate=SedTemplate)
        bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, runName=run,
#                                            plotFuncs=plotFuncs, \
                                                plotFuncs=plotFuncsCoverage, \
                                                summaryMetrics=summaryMetrics)
        bundleList.append(bundle)

        metric = metrics.ParallaxHADegenMetric(rmag=rmag, seeingCol=seeingCol, SedTemplate=SedTemplate)
        bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, runName=run,
#                                            plotFuncs=plotFuncs, \
                                                plotFuncs=plotFuncsHAdegen, \
                                                summaryMetrics=summaryMetrics)
        bundleList.append(bundle)

        # Run everything and make plots
        bundleDict = metricBundles.makeBundlesDictFromList(bundleList)
        bgroup = metricBundles.MetricBundleGroup(bundleDict, opsdb, outDir=outDir, resultsDb=resultsDb)
#        try:
        bgroup.runAll()

        print "runAstrom.go INFO - bundles took %.2f minutes" \
            % ((time.time() - timeStartIteration) / 60.)

#        except KeyboardInterrupt:
#            print "runAstrom.go FATAL - keyboard interrupt detected. Halting."
#            return
        bgroup.plotAll()

        print "runAstrom.go INFO - bundles + plotting took %.2f minutes" \
            % ((time.time() - timeStartIteration) / 60.)
        

    print "Finished entire set. %i runs took %.2f minutes." % (iRun + 1, (time.time()-tStart)/60.)
Beispiel #8
0
def metadataMaps(value,
                 colmap=None,
                 runName='opsim',
                 valueName=None,
                 groupName=None,
                 extraSql=None,
                 extraMetadata=None,
                 nside=64):
    """Calculate 25/50/75 percentile values on maps across sky for a single metadata value.

    TODO: handle stackers which need configuration (degrees, in particular) more automatically.
    Currently have a hack for HA & normairmass.

    Parameters
    ----------
    value : str
        The column name for the quantity to evaluate. (column name in the database or created by a stacker).
    colmap : dict or None, opt
        A dictionary with a mapping of column names. Default will use OpsimV4 column names.
    runName : str, opt
        The name of the simulated survey. Default is "opsim".
    valueName : str, opt
        The name of the value to be reported in the resultsDb and added to the metric.
        This is intended to help standardize metric comparison between sim versions.
        value = name as it is in the database (seeingFwhmGeom, etc).
        valueName = name to be recorded ('seeingGeom', etc.).  Default is None, which will match 'value'.
    groupName : str, opt
        The group name for this quantity in the displayDict. Default is the same as 'valueName', capitalized.
    extraSql : str, opt
        Additional constraint to add to any sql constraints (e.g. 'propId=1' or 'fieldID=522').
        Default None, for no additional constraints.
    extraMetadata : str, opt
        Additional metadata to add before any below (i.e. "WFD").  Default is None.
    nside : int, opt
        Nside value for healpix slicer. Default 64.
        If "None" is passed, the healpixslicer-based metrics will be skipped.

    Returns
    -------
    metricBundleDict
    """
    if colmap is None:
        colmap = ColMapDict('opsimV4')
    bundleList = []

    if valueName is None:
        valueName = value

    if groupName is None:
        groupName = valueName.capitalize()
        subgroup = extraMetadata
    else:
        groupName = groupName.capitalize()
        subgroup = valueName.capitalize()

    if subgroup is None:
        subgroup = 'All visits'

    displayDict = {'group': groupName, 'subgroup': subgroup}

    raCol, decCol, degrees, ditherStacker, ditherMeta = radecCols(
        None, colmap, None)
    extraMetadata = combineMetadata(extraMetadata, ditherMeta)
    # Set up basic all and per filter sql constraints.
    filterlist, colors, orders, sqls, metadata = filterList(
        all=True, extraSql=extraSql, extraMetadata=extraMetadata)

    # Hack to make HA work, but really I need to account for any stackers/colmaps.
    if value == 'HA':
        stackerList = [
            stackers.HourAngleStacker(lstCol=colmap['lst'],
                                      raCol=raCol,
                                      degrees=degrees)
        ]
    elif value == 'normairmass':
        stackerList = [stackers.NormAirmassStacker(degrees=degrees)]
    else:
        stackerList = None

    # Make maps of 25/median/75 for all and per filter, per RA/Dec, with standard summary stats.
    mList = []
    mList.append(
        metrics.PercentileMetric(value,
                                 percentile=25,
                                 metricName='25thPercentile %s' % (valueName)))
    mList.append(
        metrics.MedianMetric(value, metricName='Median %s' % (valueName)))
    mList.append(
        metrics.PercentileMetric(value,
                                 percentile=75,
                                 metricName='75thPercentile %s' % (valueName)))
    slicer = slicers.HealpixSlicer(nside=nside,
                                   latCol=decCol,
                                   lonCol=raCol,
                                   latLonDeg=degrees)
    subsetPlots = [plots.HealpixSkyMap(), plots.HealpixHistogram()]
    for f in filterlist:
        for m in mList:
            displayDict['caption'] = 'Map of %s' % m.name
            if valueName != value:
                displayDict['caption'] += ' (%s)' % value
            displayDict['caption'] += ' for %s.' % metadata[f]
            displayDict['order'] = orders[f]
            bundle = mb.MetricBundle(m,
                                     slicer,
                                     sqls[f],
                                     stackerList=stackerList,
                                     metadata=metadata[f],
                                     plotFuncs=subsetPlots,
                                     displayDict=displayDict,
                                     summaryMetrics=standardSummary())
            bundleList.append(bundle)

    # Set the runName for all bundles and return the bundleDict.
    for b in bundleList:
        b.setRunName(runName)
    plotBundles = []
    return mb.makeBundlesDictFromList(bundleList), plotBundles
 def testMedianMetric(self):
     """Test median metric."""
     testmetric = metrics.MedianMetric('testdata')
     self.assertEqual(testmetric.run(self.dv),
                      np.median(self.dv['testdata']))
Beispiel #10
0
def scienceRadarBatch(colmap=None,
                      runName='',
                      extraSql=None,
                      extraMetadata=None,
                      nside=64,
                      benchmarkArea=18000,
                      benchmarkNvisits=825,
                      DDF=True):
    """A batch of metrics for looking at survey performance relative to the SRD and the main
    science drivers of LSST.

    Parameters
    ----------

    """
    # Hide dependencies
    from mafContrib.LSSObsStrategy.galaxyCountsMetric_extended import GalaxyCountsMetric_extended
    from mafContrib import Plasticc_metric, plasticc_slicer, load_plasticc_lc

    if colmap is None:
        colmap = ColMapDict('opsimV4')

    if extraSql is None:
        extraSql = ''
    if extraSql == '':
        joiner = ''
    else:
        joiner = ' and '

    bundleList = []

    healslicer = slicers.HealpixSlicer(nside=nside)
    subsetPlots = [plots.HealpixSkyMap(), plots.HealpixHistogram()]

    # Load up the plastic light curves
    models = ['SNIa-normal', 'KN']
    plasticc_models_dict = {}
    for model in models:
        plasticc_models_dict[model] = list(
            load_plasticc_lc(model=model).values())

    #########################
    # SRD, DM, etc
    #########################
    sql = extraSql
    displayDict = {
        'group': 'SRD',
        'subgroup': 'fO',
        'order': 0,
        'caption': None
    }
    metric = metrics.CountMetric(col=colmap['mjd'], metricName='fO')
    plotDict = {
        'xlabel': 'Number of Visits',
        'Asky': benchmarkArea,
        'Nvisit': benchmarkNvisits,
        'xMin': 0,
        'xMax': 1500
    }
    summaryMetrics = [
        metrics.fOArea(nside=nside,
                       norm=False,
                       metricName='fOArea',
                       Asky=benchmarkArea,
                       Nvisit=benchmarkNvisits),
        metrics.fOArea(nside=nside,
                       norm=True,
                       metricName='fOArea/benchmark',
                       Asky=benchmarkArea,
                       Nvisit=benchmarkNvisits),
        metrics.fONv(nside=nside,
                     norm=False,
                     metricName='fONv',
                     Asky=benchmarkArea,
                     Nvisit=benchmarkNvisits),
        metrics.fONv(nside=nside,
                     norm=True,
                     metricName='fONv/benchmark',
                     Asky=benchmarkArea,
                     Nvisit=benchmarkNvisits)
    ]
    caption = 'The FO metric evaluates the overall efficiency of observing. '
    caption += (
        'foNv: out of %.2f sq degrees, the area receives at least X and a median of Y visits '
        '(out of %d, if compared to benchmark). ' %
        (benchmarkArea, benchmarkNvisits))
    caption += ('fOArea: this many sq deg (out of %.2f sq deg if compared '
                'to benchmark) receives at least %d visits. ' %
                (benchmarkArea, benchmarkNvisits))
    displayDict['caption'] = caption
    bundle = mb.MetricBundle(metric,
                             healslicer,
                             sql,
                             plotDict=plotDict,
                             displayDict=displayDict,
                             summaryMetrics=summaryMetrics,
                             plotFuncs=[plots.FOPlot()])
    bundleList.append(bundle)
    displayDict['order'] += 1

    displayDict = {
        'group': 'SRD',
        'subgroup': 'Gaps',
        'order': 0,
        'caption': None
    }
    plotDict = {'percentileClip': 95.}
    for filtername in 'ugrizy':
        sql = extraSql + joiner + 'filter ="%s"' % filtername
        metric = metrics.MaxGapMetric()
        summaryMetrics = [
            metrics.PercentileMetric(
                percentile=95,
                metricName='95th percentile of Max gap, %s' % filtername)
        ]
        bundle = mb.MetricBundle(metric,
                                 healslicer,
                                 sql,
                                 plotFuncs=subsetPlots,
                                 summaryMetrics=summaryMetrics,
                                 displayDict=displayDict,
                                 plotDict=plotDict)
        bundleList.append(bundle)
        displayDict['order'] += 1

    #########################
    # Solar System
    #########################

    # XXX -- may want to do Solar system seperatly

    # XXX--fraction of NEOs detected (assume some nominal size and albido)
    # XXX -- fraction of MBAs detected
    # XXX -- fraction of KBOs detected
    # XXX--any others? Planet 9s? Comets? Neptune Trojans?

    #########################
    # Cosmology
    #########################

    displayDict = {
        'group': 'Cosmology',
        'subgroup': 'galaxy counts',
        'order': 0,
        'caption': None
    }
    plotDict = {'percentileClip': 95.}
    sql = extraSql + joiner + 'filter="i"'
    metric = GalaxyCountsMetric_extended(filterBand='i',
                                         redshiftBin='all',
                                         nside=nside)
    summary = [
        metrics.AreaSummaryMetric(area=18000,
                                  reduce_func=np.sum,
                                  decreasing=True,
                                  metricName='N Galaxies (WFD)')
    ]
    summary.append(metrics.SumMetric(metricName='N Galaxies (all)'))
    # make sure slicer has cache off
    slicer = slicers.HealpixSlicer(nside=nside, useCache=False)
    bundle = mb.MetricBundle(metric,
                             slicer,
                             sql,
                             plotDict=plotDict,
                             displayDict=displayDict,
                             summaryMetrics=summary,
                             plotFuncs=subsetPlots)
    bundleList.append(bundle)
    displayDict['order'] += 1

    # let's put Type Ia SN in here
    displayDict['subgroup'] = 'SNe Ia'
    metadata = ''
    # XXX-- use the light curves from PLASTICC here
    displayDict['Caption'] = 'Fraction of normal SNe Ia'
    sql = ''
    slicer = plasticc_slicer(plcs=plasticc_models_dict['SNIa-normal'],
                             seed=42,
                             badval=0)
    metric = Plasticc_metric(metricName='SNIa')
    # Set the maskval so that we count missing objects as zero.
    summary_stats = [metrics.MeanMetric(maskVal=0)]
    plotFuncs = [plots.HealpixSkyMap()]
    bundle = mb.MetricBundle(metric,
                             slicer,
                             sql,
                             runName=runName,
                             summaryMetrics=summary_stats,
                             plotFuncs=plotFuncs,
                             metadata=metadata,
                             displayDict=displayDict)
    bundleList.append(bundle)
    displayDict['order'] += 1

    # XXX--need some sort of metric for weak lensing and camera rotation.

    #########################
    # Variables and Transients
    #########################
    displayDict = {
        'group': 'Variables and Transients',
        'subgroup': 'Periodic Stars',
        'order': 0,
        'caption': None
    }
    periods = [0.1, 0.5, 1., 2., 5., 10., 20.]  # days

    plotDict = {}
    metadata = ''
    sql = extraSql
    displayDict[
        'Caption'] = 'Measure of how well a periodic signal can be measured combining amplitude and phase coverage. 1 is perfect, 0 is no way to fit'
    for period in periods:
        summary = metrics.PercentileMetric(
            percentile=10.,
            metricName='10th %%-ile Periodic Quality, Period=%.1f days' %
            period)
        metric = metrics.PeriodicQualityMetric(
            period=period,
            starMag=20.,
            metricName='Periodic Stars, P=%.1f d' % period)
        bundle = mb.MetricBundle(metric,
                                 healslicer,
                                 sql,
                                 metadata=metadata,
                                 displayDict=displayDict,
                                 plotDict=plotDict,
                                 plotFuncs=subsetPlots,
                                 summaryMetrics=summary)
        bundleList.append(bundle)
        displayDict['order'] += 1

    # XXX add some PLASTICC metrics for kilovnova and tidal disruption events.
    displayDict['subgroup'] = 'KN'
    displayDict['caption'] = 'Fraction of Kilonova (from PLASTICC)'
    sql = ''
    slicer = plasticc_slicer(plcs=plasticc_models_dict['KN'],
                             seed=43,
                             badval=0)
    metric = Plasticc_metric(metricName='KN')
    summary_stats = [metrics.MeanMetric(maskVal=0)]
    plotFuncs = [plots.HealpixSkyMap()]
    bundle = mb.MetricBundle(metric,
                             slicer,
                             sql,
                             runName=runName,
                             summaryMetrics=summary_stats,
                             plotFuncs=plotFuncs,
                             metadata=metadata,
                             displayDict=displayDict)
    bundleList.append(bundle)

    displayDict['order'] += 1

    # XXX -- would be good to add some microlensing events, for both MW and LMC/SMC.

    #########################
    # Milky Way
    #########################

    # Let's do the proper motion, parallax, and DCR degen of a 20nd mag star
    rmag = 20.
    displayDict = {
        'group': 'Milky Way',
        'subgroup': 'Astrometry',
        'order': 0,
        'caption': None
    }

    sql = extraSql
    metadata = ''
    plotDict = {'percentileClip': 95.}
    metric = metrics.ParallaxMetric(metricName='Parallax Error r=%.1f' %
                                    (rmag),
                                    rmag=rmag,
                                    seeingCol=colmap['seeingGeom'],
                                    filterCol=colmap['filter'],
                                    m5Col=colmap['fiveSigmaDepth'],
                                    normalize=False)
    summary = [
        metrics.AreaSummaryMetric(area=18000,
                                  reduce_func=np.median,
                                  decreasing=False,
                                  metricName='Median Parallax Error (WFD)')
    ]
    summary.append(
        metrics.PercentileMetric(percentile=95,
                                 metricName='95th Percentile Parallax Error'))
    bundle = mb.MetricBundle(metric,
                             healslicer,
                             sql,
                             metadata=metadata,
                             displayDict=displayDict,
                             plotDict=plotDict,
                             plotFuncs=subsetPlots,
                             summaryMetrics=summary)
    bundleList.append(bundle)
    displayDict['order'] += 1

    metric = metrics.ProperMotionMetric(
        metricName='Proper Motion Error r=%.1f' % rmag,
        rmag=rmag,
        m5Col=colmap['fiveSigmaDepth'],
        mjdCol=colmap['mjd'],
        filterCol=colmap['filter'],
        seeingCol=colmap['seeingGeom'],
        normalize=False)
    summary = [
        metrics.AreaSummaryMetric(
            area=18000,
            reduce_func=np.median,
            decreasing=False,
            metricName='Median Proper Motion Error (WFD)')
    ]
    summary.append(
        metrics.PercentileMetric(
            metricName='95th Percentile Proper Motion Error'))
    bundle = mb.MetricBundle(metric,
                             healslicer,
                             sql,
                             metadata=metadata,
                             displayDict=displayDict,
                             plotDict=plotDict,
                             summaryMetrics=summary,
                             plotFuncs=subsetPlots)
    bundleList.append(bundle)
    displayDict['order'] += 1

    metric = metrics.ParallaxDcrDegenMetric(
        metricName='Parallax-DCR degeneracy r=%.1f' % (rmag),
        rmag=rmag,
        seeingCol=colmap['seeingEff'],
        filterCol=colmap['filter'],
        m5Col=colmap['fiveSigmaDepth'])
    caption = 'Correlation between parallax offset magnitude and hour angle for a r=%.1f star.' % (
        rmag)
    caption += ' (0 is good, near -1 or 1 is bad).'
    # XXX--not sure what kind of summary to do here
    summary = [metrics.MeanMetric(metricName='Mean DCR Degeneracy')]
    bundle = mb.MetricBundle(metric,
                             healslicer,
                             sql,
                             metadata=metadata,
                             displayDict=displayDict,
                             summaryMetrics=summary,
                             plotFuncs=subsetPlots)
    bundleList.append(bundle)
    displayDict['order'] += 1

    for b in bundleList:
        b.setRunName(runName)

    #########################
    # DDF
    #########################
    ddf_time_bundleDicts = []
    if DDF:
        # Hide this import to avoid adding a dependency.
        from lsst.sims.featureScheduler.surveys import generate_dd_surveys
        ddf_surveys = generate_dd_surveys()
        # For doing a high-res sampling of the DDF for co-adds
        ddf_radius = 1.8  # Degrees
        ddf_nside = 512

        ra, dec = hpid2RaDec(ddf_nside, np.arange(hp.nside2npix(ddf_nside)))

        displayDict = {
            'group': 'DDF depths',
            'subgroup': None,
            'order': 0,
            'caption': None
        }

        # Run the inter and intra gaps at the center of the DDFs
        for survey in ddf_surveys:
            slicer = slicers.UserPointsSlicer(ra=np.degrees(survey.ra),
                                              dec=np.degrees(survey.dec),
                                              useCamera=False)
            ddf_time_bundleDicts.append(
                interNight(colmap=colmap,
                           slicer=slicer,
                           runName=runName,
                           nside=64,
                           extraSql='note="%s"' % survey.survey_name,
                           subgroup=survey.survey_name)[0])
            ddf_time_bundleDicts.append(
                intraNight(colmap=colmap,
                           slicer=slicer,
                           runName=runName,
                           nside=64,
                           extraSql='note="%s"' % survey.survey_name,
                           subgroup=survey.survey_name)[0])

        for survey in ddf_surveys:
            displayDict['subgroup'] = survey.survey_name
            # Crop off the u-band only DDF
            if survey.survey_name[0:4] != 'DD:u':
                dist_to_ddf = angularSeparation(ra, dec, np.degrees(survey.ra),
                                                np.degrees(survey.dec))
                goodhp = np.where(dist_to_ddf <= ddf_radius)
                slicer = slicers.UserPointsSlicer(ra=ra[goodhp],
                                                  dec=dec[goodhp],
                                                  useCamera=False)
                for filtername in ['u', 'g', 'r', 'i', 'z', 'y']:
                    metric = metrics.Coaddm5Metric(
                        metricName=survey.survey_name + ', ' + filtername)
                    summary = [
                        metrics.MedianMetric(metricName='median depth ' +
                                             survey.survey_name + ', ' +
                                             filtername)
                    ]
                    sql = extraSql + joiner + 'filter = "%s"' % filtername
                    bundle = mb.MetricBundle(metric,
                                             slicer,
                                             sql,
                                             metadata=metadata,
                                             displayDict=displayDict,
                                             summaryMetrics=summary,
                                             plotFuncs=[])
                    bundleList.append(bundle)
                    displayDict['order'] += 1

        displayDict = {
            'group': 'DDF Transients',
            'subgroup': None,
            'order': 0,
            'caption': None
        }
        for survey in ddf_surveys:
            displayDict['subgroup'] = survey.survey_name
            if survey.survey_name[0:4] != 'DD:u':
                slicer = plasticc_slicer(
                    plcs=plasticc_models_dict['SNIa-normal'],
                    seed=42,
                    ra_cen=survey.ra,
                    dec_cen=survey.dec,
                    radius=np.radians(3.),
                    useCamera=False)
                metric = Plasticc_metric(metricName=survey.survey_name +
                                         ' SNIa')
                sql = ''
                summary_stats = [metrics.MeanMetric(maskVal=0)]
                plotFuncs = [plots.HealpixSkyMap()]
                bundle = mb.MetricBundle(metric,
                                         slicer,
                                         sql,
                                         runName=runName,
                                         summaryMetrics=summary_stats,
                                         plotFuncs=plotFuncs,
                                         metadata=metadata,
                                         displayDict=displayDict)
                bundleList.append(bundle)

    displayDict['order'] += 1

    for b in bundleList:
        b.setRunName(runName)

    bundleDict = mb.makeBundlesDictFromList(bundleList)

    intraDict = intraNight(colmap=colmap,
                           runName=runName,
                           nside=nside,
                           extraSql=extraSql,
                           extraMetadata=extraMetadata)[0]
    interDict = interNight(colmap=colmap,
                           runName=runName,
                           nside=nside,
                           extraSql=extraSql,
                           extraMetadata=extraMetadata)[0]

    bundleDict.update(intraDict)
    bundleDict.update(interDict)
    for ddf_time in ddf_time_bundleDicts:
        bundleDict.update(ddf_time)

    return bundleDict
Beispiel #11
0
def descWFDBatch(colmap=None, runName='opsim', nside=64,
                 bandpass='******', nfilters_needed=6, lim_ebv=0.2,
                 mag_cuts = {1: 24.75 - 0.1, 3: 25.35 - 0.1, 6: 25.72 - 0.1, 10: 26.0 - 0.1}):

    # Hide some dependencies .. we should probably bring these into MAF
    from mafContrib.lssmetrics.depthLimitedNumGalMetric import DepthLimitedNumGalMetric
    from mafContrib import (Plasticc_metric, plasticc_slicer, load_plasticc_lc)

    # The options to add additional sql constraints are removed for now.
    if colmap is None:
        colmap = ColMapDict('fbs')

    # Calculate a subset of DESC WFD-related metrics.
    displayDict = {'group': 'Cosmology'}
    subgroupCount = 1

    standardStats = standardSummary(withCount=False)
    subsetPlots = [plots.HealpixSkyMap(), plots.HealpixHistogram()]

    if not isinstance(mag_cuts, dict):
        if isinstance(mag_cuts, float) or isinstance(mag_cuts, int):
            mag_cuts = {10: mag_cuts}
        else:
            raise TypeError()
    yrs = list(mag_cuts.keys())
    maxYr = max(yrs)

    # Load up the plastic light curves
    models = ['SNIa-normal']
    plasticc_models_dict = {}
    for model in models:
        plasticc_models_dict[model] = list(load_plasticc_lc(model=model).values())

    # One of the primary concerns for DESC WFD metrics is to add dust extinction and coadded depth limits
    # as well as to get some coverage in all 6 bandpasses.
    # These cuts figure into many of the general metrics.

    displayDict['subgroup'] = f'{subgroupCount}: Static Science'
    ## Static Science
    # Calculate the static science metrics - effective survey area, mean/median coadded depth, stdev of
    # coadded depth and the 3x2ptFoM emulator.

    dustmap = maps.DustMap(nside=nside, interp=False)
    pix_area = hp.nside2pixarea(nside, degrees=True)
    summaryMetrics = [metrics.MeanMetric(), metrics.MedianMetric(), metrics.RmsMetric(),
                      metrics.CountRatioMetric(normVal=1/pix_area, metricName='Effective Area (deg)')]
    bundleList = []
    displayDict['order'] = 0
    for yr_cut in yrs:
        ptsrc_lim_mag_i_band = mag_cuts[yr_cut]
        sqlconstraint = 'night <= %s' % (yr_cut * 365.25)
        sqlconstraint += ' and note not like "DD%"'
        metadata = f'{bandpass} band non-DD year {yr_cut}'
        ThreebyTwoSummary = metrics.StaticProbesFoMEmulatorMetricSimple(nside=nside, year=yr_cut,
                                                                        metricName='3x2ptFoM')
        print(colmap['fiveSigmaDepth'], colmap['filter'])
        m = metrics.ExgalM5_with_cuts(m5Col=colmap['fiveSigmaDepth'], filterCol=colmap['filter'],
                                      lsstFilter=bandpass, nFilters=nfilters_needed,
                                      extinction_cut=lim_ebv, depth_cut=ptsrc_lim_mag_i_band)
        s = slicers.HealpixSlicer(nside=nside, useCache=False)
        caption = f'Cosmology/Static Science metrics are based on evaluating the region of '
        caption += f'the sky that meets the requirements (in year {yr_cut} of coverage in '
        caption += f'all {nfilters_needed}, a lower E(B-V) value than {lim_ebv}, and at '
        caption += f'least a coadded depth of {ptsrc_lim_mag_i_band} in {bandpass}. '
        caption += f'From there the effective survey area, coadded depth, standard deviation of the depth, '
        caption += f'and a 3x2pt static science figure of merit emulator are calculated using the '
        caption += f'dust-extincted coadded depth map (over that reduced footprint).'
        displayDict['caption'] = caption
        bundle = mb.MetricBundle(m, s, sqlconstraint, mapsList=[dustmap], metadata=metadata,
                                 summaryMetrics=summaryMetrics + [ThreebyTwoSummary],
                                 displayDict=displayDict)
        displayDict['order'] += 1
        bundleList.append(bundle)


    ## LSS Science
    # The only metric we have from LSS is the NGals metric - which is similar to the GalaxyCountsExtended
    # metric, but evaluated only on the depth/dust cuts footprint.
    subgroupCount += 1
    displayDict['subgroup'] = f'{subgroupCount}: LSS'
    displayDict['order'] = 0
    plotDict = {'nTicks': 5}
    # Have to include all filters in query, so that we check for all-band coverage.
    # Galaxy numbers calculated using 'bandpass' images only though.
    sqlconstraint = f'note not like "DD%"'
    metadata = f'{bandpass} band galaxies non-DD'
    metric = DepthLimitedNumGalMetric(m5Col=colmap['fiveSigmaDepth'], filterCol=colmap['filter'],
                                      nside=nside, filterBand=bandpass, redshiftBin='all',
                                      nfilters_needed=nfilters_needed,
                                      lim_mag_i_ptsrc=mag_cuts[maxYr], lim_ebv=lim_ebv)
    summary = [metrics.AreaSummaryMetric(area=18000, reduce_func=np.sum, decreasing=True,
                                         metricName='N Galaxies (18k)')]
    summary.append(metrics.SumMetric(metricName='N Galaxies (all)'))
    slicer = slicers.HealpixSlicer(nside=nside, useCache=False)
    bundle = mb.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict,
                             metadata=metadata, mapsList=[dustmap],
                             displayDict=displayDict, summaryMetrics=summary,
                             plotFuncs=subsetPlots)
    bundleList.append(bundle)


    ## WL metrics
    # Calculates the number of visits per pointing, after removing parts of the footprint due to dust/depth
    subgroupCount += 1
    displayDict['subgroup'] = f'{subgroupCount}: WL'
    displayDict['order'] = 0
    sqlconstraint = f'note not like "DD%" and filter = "{bandpass}"'
    metadata = f'{bandpass} band non-DD'
    minExpTime = 15
    m = metrics.WeakLensingNvisits(m5Col=colmap['fiveSigmaDepth'], expTimeCol=colmap['exptime'],
                                   lsstFilter=bandpass, depthlim=mag_cuts[maxYr],
                                   ebvlim=lim_ebv, min_expTime=minExpTime)
    s = slicers.HealpixSlicer(nside=nside, useCache=False)
    displayDict['caption'] = f'The number of visits per pointing, over the same reduced footprint as '
    displayDict['caption'] += f'described above. A cutoff of {minExpTime} removes very short visits.'
    displayDict['order'] = 1
    bundle = mb.MetricBundle(m, s, sqlconstraint, mapsList=[dustmap], metadata=metadata,
                             summaryMetrics=standardStats, displayDict=displayDict)
    bundleList.append(bundle)

    # This probably will get replaced by @pgris's SN metrics?
    subgroupCount += 1
    displayDict['subgroup'] = f'{subgroupCount}: SNe Ia'
    displayDict['order'] = 0
    # XXX-- use the light curves from PLASTICC here
    displayDict['caption'] = 'Fraction of normal SNe Ia (using PLaSTICCs)'
    sqlconstraint = 'note not like "DD%"'
    metadata = 'non-DD'
    slicer = plasticc_slicer(plcs=plasticc_models_dict['SNIa-normal'], seed=42, badval=0)
    metric = Plasticc_metric(metricName='SNIa')
    # Set the maskval so that we count missing objects as zero.
    summary_stats = [metrics.MeanMetric(maskVal=0)]
    plotFuncs = [plots.HealpixSkyMap()]
    bundle = mb.MetricBundle(metric, slicer, sqlconstraint,
                             metadata=metadata, summaryMetrics=summary_stats,
                             plotFuncs=plotFuncs,  displayDict=displayDict)
    bundleList.append(bundle)

    subgroupCount += 1
    displayDict['subgroup'] = f'{subgroupCount}: Camera Rotator'
    displayDict['caption'] = 'Kuiper statistic (0 is uniform, 1 is delta function) of the '
    slicer = slicers.HealpixSlicer(nside=nside)
    metric1 = metrics.KuiperMetric('rotSkyPos')
    metric2 = metrics.KuiperMetric('rotTelPos')
    filterlist, colors, filterorders, filtersqls, filtermetadata = filterList(all=False,
                                                                              extraSql=None,
                                                                              extraMetadata=None)
    for f in filterlist:
        for m in [metric1, metric2]:
            plotDict = {'color': colors[f]}
            displayDict['order'] = filterorders[f]
            displayDict['caption'] += f"{m.colname} for visits in {f} band."
            bundleList.append(mb.MetricBundle(m, slicer, filtersqls[f], plotDict=plotDict,
                                              displayDict=displayDict, summaryMetrics=standardStats,
                                              plotFuncs=subsetPlots))

    # Set the runName for all bundles and return the bundleDict.
    for b in bundleList:
        b.setRunName(runName)
    return mb.makeBundlesDictFromList(bundleList)
Beispiel #12
0
def scienceRadarBatch(colmap=None, runName='opsim', extraSql=None, extraMetadata=None, nside=64,
                      benchmarkArea=18000, benchmarkNvisits=825, DDF=True):
    """A batch of metrics for looking at survey performance relative to the SRD and the main
    science drivers of LSST.

    Parameters
    ----------

    """
    # Hide dependencies
    from mafContrib.LSSObsStrategy.galaxyCountsMetric_extended import GalaxyCountsMetric_extended
    from mafContrib import (Plasticc_metric, plasticc_slicer, load_plasticc_lc,
                            TdePopMetric, generateTdePopSlicer,
                            generateMicrolensingSlicer, MicrolensingMetric)

    if colmap is None:
        colmap = ColMapDict('fbs')

    if extraSql is None:
        extraSql = ''
    if extraSql == '':
        joiner = ''
    else:
        joiner = ' and '

    bundleList = []
    # Get some standard per-filter coloring and sql constraints
    filterlist, colors, filterorders, filtersqls, filtermetadata = filterList(all=False,
                                                                              extraSql=extraSql,
                                                                              extraMetadata=extraMetadata)

    standardStats = standardSummary(withCount=False)

    healslicer = slicers.HealpixSlicer(nside=nside)
    subsetPlots = [plots.HealpixSkyMap(), plots.HealpixHistogram()]

    # Load up the plastic light curves - SNIa-normal are loaded in descWFDBatch
    models = ['SNIa-normal', 'KN']
    plasticc_models_dict = {}
    for model in models:
        plasticc_models_dict[model] = list(load_plasticc_lc(model=model).values())

    #########################
    # SRD, DM, etc
    #########################
    fOb = fOBatch(runName=runName, colmap=colmap, extraSql=extraSql, extraMetadata=extraMetadata,
                  benchmarkArea=benchmarkArea, benchmarkNvisits=benchmarkNvisits)
    astromb = astrometryBatch(runName=runName, colmap=colmap, extraSql=extraSql, extraMetadata=extraMetadata)
    rapidb = rapidRevisitBatch(runName=runName, colmap=colmap, extraSql=extraSql, extraMetadata=extraMetadata)

    # loop through and modify the display dicts - set SRD as group and their previous 'group' as the subgroup
    temp_list = []
    for key in fOb:
        temp_list.append(fOb[key])
    for key in astromb:
        temp_list.append(astromb[key])
    for key in rapidb:
        temp_list.append(rapidb[key])
    for metricb in temp_list:
        metricb.displayDict['subgroup'] = metricb.displayDict['group'].replace('SRD', '').lstrip(' ')
        metricb.displayDict['group'] = 'SRD'
    bundleList.extend(temp_list)

    displayDict = {'group': 'SRD', 'subgroup': 'Year Coverage', 'order': 0,
                   'caption': 'Number of years with observations.'}
    slicer = slicers.HealpixSlicer(nside=nside)
    metric = metrics.YearCoverageMetric()
    for f in filterlist:
        plotDict = {'colorMin': 7, 'colorMax': 10, 'color': colors[f]}
        summary = [metrics.AreaSummaryMetric(area=18000, reduce_func=np.mean, decreasing=True,
                                             metricName='N Seasons (18k) %s' % f)]
        bundleList.append(mb.MetricBundle(metric, slicer, filtersqls[f],
                                          plotDict=plotDict, metadata=filtermetadata[f],
                                          displayDict=displayDict, summaryMetrics=summary))

    #########################
    # Solar System
    #########################
    # Generally, we need to run Solar System metrics separately; they're a multi-step process.


    #########################
    # Galaxies
    #########################

    displayDict = {'group': 'Galaxies', 'subgroup': 'Galaxy Counts', 'order': 0, 'caption': None}
    plotDict = {'percentileClip': 95., 'nTicks': 5}
    sql = extraSql + joiner + 'filter="i"'
    metadata = combineMetadata(extraMetadata, 'i band')
    metric = GalaxyCountsMetric_extended(filterBand='i', redshiftBin='all', nside=nside)
    summary = [metrics.AreaSummaryMetric(area=18000, reduce_func=np.sum, decreasing=True,
                                         metricName='N Galaxies (18k)')]
    summary.append(metrics.SumMetric(metricName='N Galaxies (all)'))
    # make sure slicer has cache off
    slicer = slicers.HealpixSlicer(nside=nside, useCache=False)
    displayDict['caption'] = 'Number of galaxies across the sky, in i band. Generally, full survey footprint.'
    bundle = mb.MetricBundle(metric, slicer, sql, plotDict=plotDict,
                             metadata=metadata,
                             displayDict=displayDict, summaryMetrics=summary,
                             plotFuncs=subsetPlots)
    bundleList.append(bundle)
    displayDict['order'] += 1


    #########################
    # Cosmology
    #########################

    # note the desc batch does not currently take the extraSql or extraMetadata arguments.
    descBundleDict = descWFDBatch(colmap=colmap, runName=runName, nside=nside)
    for d in descBundleDict:
        bundleList.append(descBundleDict[d])

    #########################
    # Variables and Transients
    #########################
    displayDict = {'group': 'Variables/Transients',
                   'subgroup': 'Periodic Stars',
                   'order': 0, 'caption': None}
    for period in [0.5, 1, 2,]:
        for magnitude in [21., 24.]:
            amplitudes = [0.05, 0.1, 1.0]
            periods = [period] * len(amplitudes)
            starMags = [magnitude] * len(amplitudes)

            plotDict = {'nTicks': 3, 'colorMin': 0, 'colorMax': 3, 'xMin': 0, 'xMax': 3}
            metadata = combineMetadata('P_%.1f_Mag_%.0f_Amp_0.05-0.1-1' % (period, magnitude),
                                       extraMetadata)
            sql = None
            displayDict['caption'] = 'Metric evaluates if a periodic signal of period %.1f days could ' \
                                     'be detected for an r=%i star. A variety of amplitudes of periodicity ' \
                                     'are tested: [1, 0.1, and 0.05] mag amplitudes, which correspond to ' \
                                     'metric values of [1, 2, or 3]. ' % (period, magnitude)
            metric = metrics.PeriodicDetectMetric(periods=periods, starMags=starMags,
                                                  amplitudes=amplitudes,
                                                  metricName='PeriodDetection')
            bundle = mb.MetricBundle(metric, healslicer, sql, metadata=metadata,
                                     displayDict=displayDict, plotDict=plotDict,
                                     plotFuncs=subsetPlots, summaryMetrics=standardStats)
            bundleList.append(bundle)
            displayDict['order'] += 1

    # XXX add some PLASTICC metrics for kilovnova and tidal disruption events.
    displayDict['subgroup'] = 'KN'
    displayDict['caption'] = 'Fraction of Kilonova (from PLASTICC)'
    displayDict['order'] = 0
    slicer = plasticc_slicer(plcs=plasticc_models_dict['KN'], seed=43, badval=0)
    metric = Plasticc_metric(metricName='KN')
    plotFuncs = [plots.HealpixSkyMap()]
    summary_stats = [metrics.MeanMetric(maskVal=0)]
    bundle = mb.MetricBundle(metric, slicer, extraSql, runName=runName, summaryMetrics=summary_stats,
                             plotFuncs=plotFuncs, metadata=extraMetadata,
                             displayDict=displayDict)
    bundleList.append(bundle)

    # Tidal Disruption Events
    displayDict['subgroup'] = 'TDE'
    displayDict['caption'] = 'TDE lightcurves that could be identified'

    metric = TdePopMetric()
    slicer = generateTdePopSlicer()
    sql = ''
    plotDict = {'reduceFunc': np.sum, 'nside': 128}
    plotFuncs = [plots.HealpixSkyMap()]
    bundle = mb.MetricBundle(metric, slicer, sql, runName=runName,
                             plotDict=plotDict, plotFuncs=plotFuncs,
                             summaryMetrics=[metrics.MeanMetric(maskVal=0)],
                             displayDict=displayDict)
    bundleList.append(bundle)


    # Microlensing events
    displayDict['subgroup'] = 'Microlensing'
    displayDict['caption'] = 'Fast microlensing events'

    plotDict = {'nside': 128}
    sql = ''
    slicer = generateMicrolensingSlicer(min_crossing_time=1, max_crossing_time=10)
    metric = MicrolensingMetric(metricName='Fast Microlensing')
    bundle = mb.MetricBundle(metric, slicer, sql, runName=runName,
                             summaryMetrics=[metrics.MeanMetric(maskVal=0)],
                             plotFuncs=[plots.HealpixSkyMap()], metadata=extraMetadata,
                             displayDict=displayDict, plotDict=plotDict)
    bundleList.append(bundle)

    displayDict['caption'] = 'Slow microlensing events'
    slicer = generateMicrolensingSlicer(min_crossing_time=100, max_crossing_time=1500)
    metric = MicrolensingMetric(metricName='Slow Microlensing')
    bundle = mb.MetricBundle(metric, slicer, sql, runName=runName,
                             summaryMetrics=[metrics.MeanMetric(maskVal=0)],
                             plotFuncs=[plots.HealpixSkyMap()], metadata=extraMetadata,
                             displayDict=displayDict, plotDict=plotDict)
    bundleList.append(bundle)

    #########################
    # Milky Way
    #########################

    displayDict = {'group': 'Milky Way', 'subgroup': ''}

    displayDict['subgroup'] = 'N stars'
    slicer = slicers.HealpixSlicer(nside=nside, useCache=False)
    sum_stats = [metrics.SumMetric(metricName='Total N Stars, crowding')]
    for f in filterlist:
        stellar_map = maps.StellarDensityMap(filtername=f)
        displayDict['order'] = filterorders[f]
        displayDict['caption'] = 'Number of stars in %s band with an measurement error due to crowding ' \
                                 'of less than 0.2 mag' % f
        # Configure the NstarsMetric - note 'filtername' refers to the filter in which to evaluate crowding
        metric = metrics.NstarsMetric(crowding_error=0.2, filtername=f, ignore_crowding=False,
                                      seeingCol=colmap['seeingGeom'], m5Col=colmap['fiveSigmaDepth'],
                                      maps=[])
        plotDict = {'nTicks': 5, 'logScale': True, 'colorMin': 100}
        bundle = mb.MetricBundle(metric, slicer, filtersqls[f], runName=runName,
                                 summaryMetrics=sum_stats,
                                 plotFuncs=subsetPlots, plotDict=plotDict,
                                 displayDict=displayDict, mapsList=[stellar_map])
        bundleList.append(bundle)


    slicer = slicers.HealpixSlicer(nside=nside, useCache=False)
    sum_stats = [metrics.SumMetric(metricName='Total N Stars, no crowding')]
    for f in filterlist:
        stellar_map = maps.StellarDensityMap(filtername=f)
        displayDict['order'] = filterorders[f]
        displayDict['caption'] = 'Number of stars in %s band with an measurement error ' \
                                 'of less than 0.2 mag, not considering crowding' % f
        # Configure the NstarsMetric - note 'filtername' refers to the filter in which to evaluate crowding
        metric = metrics.NstarsMetric(crowding_error=0.2, filtername=f, ignore_crowding=True,
                                      seeingCol=colmap['seeingGeom'], m5Col=colmap['fiveSigmaDepth'],
                                      metricName='Nstars_no_crowding', maps=[])
        plotDict = {'nTicks': 5, 'logScale': True, 'colorMin': 100}
        bundle = mb.MetricBundle(metric, slicer, filtersqls[f], runName=runName,
                                 summaryMetrics=sum_stats,
                                 plotFuncs=subsetPlots, plotDict=plotDict,
                                 displayDict=displayDict, mapsList=[stellar_map])
        bundleList.append(bundle)


    #########################
    # DDF
    #########################
    if DDF:
        # Hide this import to avoid adding a dependency.
        from lsst.sims.featureScheduler.surveys import generate_dd_surveys, Deep_drilling_survey
        ddf_surveys = generate_dd_surveys()

        # Add on the Euclid fields
        # XXX--to update. Should have a spot where all the DDF locations are stored.
        ddf_surveys.append(Deep_drilling_survey([], 58.97, -49.28, survey_name='DD:EDFSa'))
        ddf_surveys.append(Deep_drilling_survey([], 63.6, -47.60, survey_name='DD:EDFSb'))

        # For doing a high-res sampling of the DDF for co-adds
        ddf_radius = 1.8  # Degrees
        ddf_nside = 512

        ra, dec = hpid2RaDec(ddf_nside, np.arange(hp.nside2npix(ddf_nside)))

        displayDict = {'group': 'DDF depths', 'subgroup': None}

        for survey in ddf_surveys:
            displayDict['subgroup'] = survey.survey_name
            # Crop off the u-band only DDF
            if survey.survey_name[0:4] != 'DD:u':
                dist_to_ddf = angularSeparation(ra, dec, np.degrees(survey.ra), np.degrees(survey.dec))
                goodhp = np.where(dist_to_ddf <= ddf_radius)
                slicer = slicers.UserPointsSlicer(ra=ra[goodhp], dec=dec[goodhp], useCamera=False)
                for f in filterlist:
                    metric = metrics.Coaddm5Metric(metricName=survey.survey_name + ', ' + f)
                    summary = [metrics.MedianMetric(metricName='Median depth ' + survey.survey_name+', ' + f)]
                    plotDict = {'color': colors[f]}
                    sql = filtersqls[f]
                    displayDict['order'] = filterorders[f]
                    displayDict['caption'] = 'Coadded m5 depth in %s band.' % (f)
                    bundle = mb.MetricBundle(metric, slicer, sql, metadata=filtermetadata[f],
                                             displayDict=displayDict, summaryMetrics=summary,
                                             plotFuncs=[], plotDict=plotDict)
                    bundleList.append(bundle)

        displayDict = {'group': 'DDF Transients', 'subgroup': None}
        for survey in ddf_surveys:
            displayDict['subgroup'] = survey.survey_name
            if survey.survey_name[0:4] != 'DD:u':
                slicer = plasticc_slicer(plcs=plasticc_models_dict['SNIa-normal'], seed=42,
                                         ra_cen=survey.ra, dec_cen=survey.dec, radius=np.radians(3.),
                                         useCamera=False)
                metric = Plasticc_metric(metricName=survey.survey_name+' SNIa')
                sql = extraSql
                summary_stats = [metrics.MeanMetric(maskVal=0)]
                plotFuncs = [plots.HealpixSkyMap()]
                bundle = mb.MetricBundle(metric, slicer, sql, runName=runName,
                                         summaryMetrics=summary_stats,
                                         plotFuncs=plotFuncs, metadata=extraMetadata,
                                         displayDict=displayDict)
                bundleList.append(bundle)
                displayDict['order'] = 10

    # Set the runName for all bundles and return the bundleDict.
    for b in bundleList:
        b.setRunName(runName)
    bundleDict = mb.makeBundlesDictFromList(bundleList)

    return bundleDict
Beispiel #13
0
        good = np.where( m5Values != 0.)
        return 1.25 * np.log10(np.sum(10.**(.8*m5Values[good])))



opsdb = db.OpsimDatabase('enigma_1189_sqlite.db')
outDir = 'Flipped'
resultsDb = db.ResultsDb(outDir=outDir)

# Grab just the WFD area
propids, propTags = opsdb.fetchPropInfo()
WFDpropid = propTags['WFD']
wfdWhere = utils.createSQLWhere('WFD', propTags)

summaryStats = [metrics.MedianMetric(), metrics.RmsMetric(), metrics.RobustRmsMetric()]

filters = ['u','g']
nside = 64
bundleList = []

years = [1,3,10]
nightWheres = [' and night <= %i' % (year*365.25) for year in years]

#raftConfigs = {'A':{'rafts1':[1,3,4,6,8,10,12,14,16,18,19,21], 'rafts2':[2,5,7,9,11,13,15,17,20]},
#               'B':{'rafts1':[7,8,11,12,13,15,16,17,18,19,20,21], 'rafts2':[1,2,3,4,5,6,9,10,14]},
#               'C':{'rafts1':[2,5,6,7,9,10,11,12,13,15,16,17,20], 'rafts2':[1,3,4,8,14,18,19,21]},
#               'D':{'rafts1':[1,2,3,4,6,8,9,10,12,13,14,16,18,19,20,21], 'rafts2':[5,7,11,15,17]},
#               'E':{'rafts1':[1,2,3,4,5,7,8,9,13,14,15,17,18,19,20,21], 'rafts2':[6,10,11,12,16]},
#               'F':{'rafts1':[1,2,3,4,5,7,8,9,13,14,15,17,18,19,20,21], 'rafts2':[6,10,11,12,16]}
#}
Beispiel #14
0
def metadataBasics(value,
                   colmap=None,
                   runName='opsim',
                   valueName=None,
                   groupName=None,
                   extraSql=None,
                   extraMetadata=None,
                   nside=64,
                   filterlist=('u', 'g', 'r', 'i', 'z', 'y')):
    """Calculate basic metrics on visit metadata 'value' (e.g. airmass, normalized airmass, seeing..).

    Calculates extended standard metrics (with unislicer) on the quantity (all visits and per filter),
    makes histogram of the value (all visits and per filter),


    Parameters
    ----------
    value : str
        The column name for the quantity to evaluate. (column name in the database or created by a stacker).
    colmap : dict or None, opt
        A dictionary with a mapping of column names. Default will use OpsimV4 column names.
    runName : str, opt
        The name of the simulated survey. Default is "opsim".
    valueName : str, opt
        The name of the value to be reported in the resultsDb and added to the metric.
        This is intended to help standardize metric comparison between sim versions.
        value = name as it is in the database (seeingFwhmGeom, etc).
        valueName = name to be recorded ('seeingGeom', etc.).  Default is None, which is set to match value.
    groupName : str, opt
        The group name for this quantity in the displayDict. Default is the same as 'value', capitalized.
    extraSql : str, opt
        Additional constraint to add to any sql constraints (e.g. 'propId=1' or 'fieldID=522').
        Default None, for no additional constraints.
    extraMetadata : str, opt
        Additional metadata to add before any below (i.e. "WFD").  Default is None.
    nside : int, opt
        Nside value for healpix slicer. Default 64.
        If "None" is passed, the healpixslicer-based metrics will be skipped.
    filterlist : list of str, opt
        List of the filternames to use for "per filter" evaluation. Default ('u', 'g', 'r', 'i', 'z', 'y').
        If None is passed, the per-filter evaluations will be skipped.

    Returns
    -------
    metricBundleDict
    """
    if colmap is None:
        colmap = ColMapDict('opsimV4')
    bundleList = []

    if valueName is None:
        valueName = value

    if groupName is None:
        groupName = valueName.capitalize()
        subgroup = extraMetadata
    else:
        groupName = groupName.capitalize()
        subgroup = valueName.capitalize()

    displayDict = {'group': groupName, 'subgroup': subgroup}

    sqlconstraints = ['']
    metadata = ['all bands']
    if filterlist is not None:
        sqlconstraints += [
            '%s = "%s"' % (colmap['filter'], f) for f in filterlist
        ]
        metadata += ['%s band' % f for f in filterlist]
    if (extraSql is not None) and (len(extraSql) > 0):
        tmp = []
        for s in sqlconstraints:
            if len(s) == 0:
                tmp.append(extraSql)
            else:
                tmp.append('%s and (%s)' % (s, extraSql))
        sqlconstraints = tmp
        if extraMetadata is None:
            metadata = ['%s %s' % (extraSql, m) for m in metadata]
    if extraMetadata is not None:
        metadata = ['%s %s' % (extraMetadata, m) for m in metadata]

    # Summarize values over all and per filter (min/mean/median/max/percentiles/outliers/rms).
    slicer = slicers.UniSlicer()
    displayDict['caption'] = None
    for sql, meta in zip(sqlconstraints, metadata):
        displayDict['order'] = -1
        for m in extendedMetrics(value, replace_colname=valueName):
            displayDict['order'] += 1
            bundle = mb.MetricBundle(m,
                                     slicer,
                                     sql,
                                     metadata=meta,
                                     displayDict=displayDict)
            bundleList.append(bundle)

    # Histogram values over all and per filter.
    for sql, meta in zip(sqlconstraints, metadata):
        displayDict['caption'] = 'Histogram of %s' % (value)
        if valueName != value:
            displayDict['caption'] += ' (%s)' % (valueName)
        displayDict['caption'] += ' for %s visits.' % (meta)
        displayDict['order'] += 1
        m = metrics.CountMetric(value, metricName='%s Histogram' % (valueName))
        slicer = slicers.OneDSlicer(sliceColName=value)
        bundle = mb.MetricBundle(m,
                                 slicer,
                                 sql,
                                 metadata=meta,
                                 displayDict=displayDict)
        bundleList.append(bundle)

    # Make maps of min/median/max for all and per filter, per RA/Dec, with standard summary stats.
    mList = []
    mList.append(metrics.MinMetric(value, metricName='Min %s' % (valueName)))
    mList.append(
        metrics.MedianMetric(value, metricName='Median %s' % (valueName)))
    mList.append(metrics.MaxMetric(value, metricName='Max %s' % (valueName)))
    slicer = slicers.HealpixSlicer(nside=nside,
                                   latCol=colmap['dec'],
                                   lonCol=colmap['ra'],
                                   latLonDeg=colmap['raDecDeg'])
    subsetPlots = [plots.HealpixSkyMap(), plots.HealpixHistogram()]
    displayDict['caption'] = None
    displayDict['order'] = -1
    for sql, meta in zip(sqlconstraints, metadata):
        for m in mList:
            displayDict['order'] += 1
            bundle = mb.MetricBundle(m,
                                     slicer,
                                     sql,
                                     metadata=meta,
                                     plotFuncs=subsetPlots,
                                     displayDict=displayDict,
                                     summaryMetrics=standardSummary())
            bundleList.append(bundle)

    # Set the runName for all bundles and return the bundleDict.
    for b in bundleList:
        b.setRunName(runName)
    return mb.makeBundlesDictFromList(bundleList)
Beispiel #15
0
def metadataBasics(value,
                   colmap=None,
                   runName='opsim',
                   valueName=None,
                   groupName=None,
                   extraSql=None,
                   extraMetadata=None,
                   nside=64,
                   ditherStacker=None,
                   ditherkwargs=None):
    """Calculate basic metrics on visit metadata 'value' (e.g. airmass, normalized airmass, seeing..).

    Calculates extended standard metrics (with unislicer) on the quantity (all visits and per filter),
    makes histogram of the value (all visits and per filter),

    TODO: handle stackers which need configuration (degrees, in particular) more automatically.
    Currently have a hack for HA & normairmass.

    Parameters
    ----------
    value : str
        The column name for the quantity to evaluate. (column name in the database or created by a stacker).
    colmap : dict or None, opt
        A dictionary with a mapping of column names. Default will use OpsimV4 column names.
    runName : str, opt
        The name of the simulated survey. Default is "opsim".
    valueName : str, opt
        The name of the value to be reported in the resultsDb and added to the metric.
        This is intended to help standardize metric comparison between sim versions.
        value = name as it is in the database (seeingFwhmGeom, etc).
        valueName = name to be recorded ('seeingGeom', etc.).  Default is None, which will match 'value'.
    groupName : str, opt
        The group name for this quantity in the displayDict. Default is the same as 'valueName', capitalized.
    extraSql : str, opt
        Additional constraint to add to any sql constraints (e.g. 'propId=1' or 'fieldID=522').
        Default None, for no additional constraints.
    extraMetadata : str, opt
        Additional metadata to add before any below (i.e. "WFD").  Default is None.
    nside : int, opt
        Nside value for healpix slicer. Default 64.
        If "None" is passed, the healpixslicer-based metrics will be skipped.
    ditherStacker: str or lsst.sims.maf.stackers.BaseDitherStacker
        Optional dither stacker to use to define ra/dec columns.
    ditherkwargs: dict, opt
        Optional dictionary of kwargs for the dither stacker.

    Returns
    -------
    metricBundleDict
    """
    if colmap is None:
        colmap = ColMapDict('opsimV4')
    bundleList = []

    if valueName is None:
        valueName = value

    if groupName is None:
        groupName = valueName.capitalize()
        subgroup = extraMetadata
    else:
        groupName = groupName.capitalize()
        subgroup = valueName.capitalize()

    if subgroup is None:
        subgroup = 'All visits'

    displayDict = {'group': groupName, 'subgroup': subgroup}

    raCol, decCol, degrees, ditherStacker, ditherMeta = radecCols(
        ditherStacker, colmap, ditherkwargs)
    extraMetadata = combineMetadata(extraMetadata, ditherMeta)
    # Set up basic all and per filter sql constraints.
    filterlist, colors, orders, sqls, metadata = filterList(
        all=True, extraSql=extraSql, extraMetadata=extraMetadata)

    # Hack to make HA work, but really I need to account for any stackers/colmaps.
    if value == 'HA':
        stackerList = [
            stackers.HourAngleStacker(lstCol=colmap['lst'],
                                      raCol=raCol,
                                      degrees=degrees)
        ]
    elif value == 'normairmass':
        stackerList = [stackers.NormAirmassStacker(degrees=degrees)]
    else:
        stackerList = None
    if ditherStacker is not None:
        if stackerList is None:
            stackerList = [ditherStacker]
        else:
            stackerList.append(ditherStacker)

    # Summarize values over all and per filter (min/mean/median/max/percentiles/outliers/rms).
    slicer = slicers.UniSlicer()
    for f in filterlist:
        for m in extendedMetrics(value, replace_colname=valueName):
            displayDict['caption'] = '%s for %s.' % (m.name, metadata[f])
            displayDict['order'] = orders[f]
            bundle = mb.MetricBundle(m,
                                     slicer,
                                     sqls[f],
                                     stackerList=stackerList,
                                     metadata=metadata[f],
                                     displayDict=displayDict)
            bundleList.append(bundle)

    # Histogram values over all and per filter.
    for f in filterlist:
        displayDict['caption'] = 'Histogram of %s' % (value)
        if valueName != value:
            displayDict['caption'] += ' (%s)' % (valueName)
        displayDict['caption'] += ' for %s.' % (metadata[f])
        displayDict['order'] = orders[f]
        m = metrics.CountMetric(value, metricName='%s Histogram' % (valueName))
        slicer = slicers.OneDSlicer(sliceColName=value)
        bundle = mb.MetricBundle(m,
                                 slicer,
                                 sqls[f],
                                 stackerList=stackerList,
                                 metadata=metadata[f],
                                 displayDict=displayDict)
        bundleList.append(bundle)

    # Make maps of min/median/max for all and per filter, per RA/Dec, with standard summary stats.
    mList = []
    mList.append(metrics.MinMetric(value, metricName='Min %s' % (valueName)))
    mList.append(
        metrics.MedianMetric(value, metricName='Median %s' % (valueName)))
    mList.append(metrics.MaxMetric(value, metricName='Max %s' % (valueName)))
    slicer = slicers.HealpixSlicer(nside=nside,
                                   latCol=decCol,
                                   lonCol=raCol,
                                   latLonDeg=degrees)
    subsetPlots = [plots.HealpixSkyMap(), plots.HealpixHistogram()]
    for f in filterlist:
        for m in mList:
            displayDict['caption'] = 'Map of %s' % m.name
            if valueName != value:
                displayDict['caption'] += ' (%s)' % value
            displayDict['caption'] += ' for %s.' % metadata[f]
            displayDict['order'] = orders[f]
            bundle = mb.MetricBundle(m,
                                     slicer,
                                     sqls[f],
                                     stackerList=stackerList,
                                     metadata=metadata[f],
                                     plotFuncs=subsetPlots,
                                     displayDict=displayDict,
                                     summaryMetrics=standardSummary())
            bundleList.append(bundle)

    # Set the runName for all bundles and return the bundleDict.
    for b in bundleList:
        b.setRunName(runName)
    return mb.makeBundlesDictFromList(bundleList)