def __getOpSimMjd(self, opsim, ra, dec, fil): colmn = 'observationStartMJD' opsdb = db.OpsimDatabase(opsim) # Directory where tmp files are going to be stored TODO eliminate - this outDir = 'TmpDir' resultsDb = db.ResultsDb(outDir=outDir) metric = metrics.PassMetric(cols=[colmn, 'fiveSigmaDepth', 'filter']) slicer = slicers.UserPointsSlicer(ra=ra, dec=dec) sqlconstraint = 'filter = \'' + fil + '\'' bundle = mb.MetricBundle(metric, slicer, sqlconstraint, runName='name') bgroup = mb.MetricBundleGroup({0: bundle}, opsdb, outDir=outDir, resultsDb=resultsDb) bgroup.runAll() filters = np.unique(bundle.metricValues[0]['filter']) mv = bundle.metricValues[0] # Get dates mjd = mv[colmn] mjd = np.sort(mjd) print('Num of visits ' + str(len(mjd)) + ' ' + opsim) return mjd
def generateTdePopSlicer(t_start=1, t_end=3652, n_events=10000, seed=42, n_files=7): """ Generate a population of TDE events, and put the info about them into a UserPointSlicer object Parameters ---------- t_start : float (1) The night to start tde events on (days) t_end : float (3652) The final night of TDE events n_events : int (10000) The number of TDE events to generate seed : float The seed passed to np.random n_files : int (7) The number of different TDE lightcurves to use """ ra, dec = uniformSphere(n_events, seed=seed) peak_times = np.random.uniform(low=t_start, high=t_end, size=n_events) file_indx = np.floor(np.random.uniform(low=0, high=n_files, size=n_events)).astype(int) # Set up the slicer to evaluate the catalog we just made slicer = slicers.UserPointsSlicer(ra, dec, latLonDeg=True, badval=0) # Add any additional information about each object to the slicer slicer.slicePoints['peak_time'] = peak_times slicer.slicePoints['file_indx'] = file_indx return slicer
def run_maf(dbFile, ra, dec): """Retrive min inter_night gap, and observation history with the input of database file name and arrays of RA and DEC. Note: the observing cadence returned are not ordered by date!! """ # establish connection to sqllite database file. opsimdb = db.OpsimDatabase(dbFile) # While we're in transition between opsim v3 and v4, this may be helpful: print("{dbFile} is an opsim version {version} database".format(dbFile=dbFile, version=opsimdb.opsimVersion)) if opsimdb.opsimVersion == "V3": # For v3 databases: mjdcol = 'expMJD' degrees = False cols = ['filter', 'fiveSigmaDepth', mjdcol, 'expDate'] stackerList = [] else: # For v4 and alternate scheduler databases. mjdcol = 'observationStartMJD' degrees = True cols = ['filter', 'fiveSigmaDepth', mjdcol] stackerList = [expDateStacker()] # IntraNightGapsMetric returns the gap (in days) between observations within the same night custom reduceFunc to find min gaps metric = metrics.cadenceMetrics.IntraNightGapsMetric(reduceFunc=np.amin, mjdCol=mjdcol) # PassMetric just pass all values metric_pass = metrics.simpleMetrics.PassMetric(cols=cols) # slicer for slicing pointing history slicer = slicers.UserPointsSlicer(ra, dec, lonCol='fieldRA', latCol='fieldDec', latLonDeg=degrees) # sql constrains, 3 for baseline2018a, 1 for rolling m2045 sql = '' # bundles to combine metric, slicer and sql constrain together bundle = metricBundles.MetricBundle(metric, slicer, sql) date_bundle = metricBundles.MetricBundle(metric_pass, slicer, sql, stackerList=stackerList) # create metric bundle group and returns bg = metricBundles.MetricBundleGroup( { 'sep': bundle, 'cadence': date_bundle }, opsimdb, outDir=outDir, resultsDb=resultsDb) bg.runAll() opsimdb.close() return bg
def generateKNPopSlicer(t_start=1, t_end=3652, n_events=10000, seed=42, n_files=100, d_min=10, d_max=300): """ Generate a population of KNe events, and put the info about them into a UserPointSlicer object Parameters ---------- t_start : float (1) The night to start kilonova events on (days) t_end : float (3652) The final night of kilonova events n_events : int (10000) The number of kilonova events to generate seed : float The seed passed to np.random n_files : int (7) The number of different kilonova lightcurves to use d_min : float or int (10) Minimum luminosity distance (Mpc) d_max : float or int (300) Maximum luminosity distance (Mpc) """ def rndm(a, b, g, size=1): """Power-law gen for pdf(x)\propto x^{g-1} for a<=x<=b""" r = np.random.random(size=size) ag, bg = a**g, b**g return (ag + (bg - ag) * r)**(1. / g) ra, dec = uniformSphere(n_events, seed=seed) peak_times = np.random.uniform(low=t_start, high=t_end, size=n_events) file_indx = np.floor(np.random.uniform(low=0, high=n_files, size=n_events)).astype(int) # Define the distance distance = rndm(d_min, d_max, 4, size=n_events) # Set up the slicer to evaluate the catalog we just made slicer = slicers.UserPointsSlicer(ra, dec, latLonDeg=True, badval=0) # Add any additional information about each object to the slicer slicer.slicePoints['peak_time'] = peak_times slicer.slicePoints['file_indx'] = file_indx slicer.slicePoints['distance'] = distance return slicer
def get_cadence(ra, dec, b, snrLimit, nPtsLimit, filters, outDir, opsimdb, resultsDb): # The pass metric just passes data straight through. metric = metrics.PassMetric(cols=['filter', 'fiveSigmaDepth', 'expMJD']) slicer = slicers.UserPointsSlicer(ra, dec, lonCol='ditheredRA', latCol='ditheredDec') sql = '' bundle = metricBundles.MetricBundle(metric, slicer, sql) bg = metricBundles.MetricBundleGroup({0: bundle}, opsimdb, outDir=outDir, resultsDb=resultsDb) bg.runAll() bundle.metricValues.data[0]['filter'] print("Plotting...") colors = {'u': 'cyan', 'g': 'g', 'r': 'y', 'i': 'r', 'z': 'm', 'y': 'k'} dayZero = bundle.metricValues.data[0]['expMJD'].min() times = [] depths = [] plt.clf() for fname in filters: good = np.where(bundle.metricValues.data[0]['filter'] == fname) times.append(bundle.metricValues.data[0]['expMJD'][good] - dayZero) depths.append(bundle.metricValues.data[0]['fiveSigmaDepth'][good]) plt.scatter(bundle.metricValues.data[0]['expMJD'][good] - dayZero, bundle.metricValues.data[0]['fiveSigmaDepth'][good], c=colors[fname], label=fname) plt.xlabel('Day') plt.ylabel('5$\sigma$ depth') plt.legend(scatterpoints=1, loc="upper left", bbox_to_anchor=(1, 1)) plt.savefig("l45b{0}_cadence.pdf".format(int(b))) return times, depths
from __future__ import print_function import lsst.sims.maf.slicers as slicers import lsst.sims.maf.db as db # Connect to opsim dbAddress = 'sqlite:///ops1_1140_sqlite.db' oo = db.OpsimDatabase(dbAddress) colnames = ['expMJD', 'fieldRA', 'fieldDec'] sqlconstraint = 'filter="r"' # Get opsim simulation data simdata = oo.fetchMetricData(colnames, sqlconstraint) # Init the slicer, set 2 points slicer = slicers.UserPointsSlicer(ra=[0., .1], dec=[0., -.1]) # Setup slicer (builds kdTree) slicer.setupSlicer(simdata) # Slice Point for index zero ind = slicer._sliceSimData(0) expMJDs = simdata[ind['idxs']]['expMJD'] print('mjd for the 1st user defined point', expMJDs) # Find the expMJDs for the 2nd point ind = slicer._sliceSimData(1) expMJDs = simdata[ind['idxs']]['expMJD'] print('mjd for the 2nd user defined point', expMJDs)
def scienceRadarBatch(colmap=None, runName='opsim', extraSql=None, extraMetadata=None, nside=64, benchmarkArea=18000, benchmarkNvisits=825, DDF=True): """A batch of metrics for looking at survey performance relative to the SRD and the main science drivers of LSST. Parameters ---------- """ # Hide dependencies from mafContrib.LSSObsStrategy.galaxyCountsMetric_extended import GalaxyCountsMetric_extended from mafContrib import Plasticc_metric, plasticc_slicer, load_plasticc_lc, TDEsAsciiMetric if colmap is None: colmap = ColMapDict('fbs') if extraSql is None: extraSql = '' if extraSql == '': joiner = '' else: joiner = ' and ' bundleList = [] # Get some standard per-filter coloring and sql constraints filterlist, colors, filterorders, filtersqls, filtermetadata = filterList( all=False, extraSql=extraSql, extraMetadata=extraMetadata) standardStats = standardSummary(withCount=False) healslicer = slicers.HealpixSlicer(nside=nside) subsetPlots = [plots.HealpixSkyMap(), plots.HealpixHistogram()] # Load up the plastic light curves models = ['SNIa-normal', 'KN'] plasticc_models_dict = {} for model in models: plasticc_models_dict[model] = list( load_plasticc_lc(model=model).values()) ######################### # SRD, DM, etc ######################### fOb = fOBatch(runName=runName, colmap=colmap, extraSql=extraSql, extraMetadata=extraMetadata, benchmarkArea=benchmarkArea, benchmarkNvisits=benchmarkNvisits) astromb = astrometryBatch(runName=runName, colmap=colmap, extraSql=extraSql, extraMetadata=extraMetadata) rapidb = rapidRevisitBatch(runName=runName, colmap=colmap, extraSql=extraSql, extraMetadata=extraMetadata) # loop through and modify the display dicts - set SRD as group and their previous 'group' as the subgroup temp_list = [] for key in fOb: temp_list.append(fOb[key]) for key in astromb: temp_list.append(astromb[key]) for key in rapidb: temp_list.append(rapidb[key]) for metricb in temp_list: metricb.displayDict['subgroup'] = metricb.displayDict['group'].replace( 'SRD', '').lstrip(' ') metricb.displayDict['group'] = 'SRD' bundleList.extend(temp_list) displayDict = { 'group': 'SRD', 'subgroup': 'Year Coverage', 'order': 0, 'caption': 'Number of years with observations.' } slicer = slicers.HealpixSlicer(nside=nside) metric = metrics.YearCoverageMetric() for f in filterlist: plotDict = {'colorMin': 7, 'colorMax': 10, 'color': colors[f]} summary = [ metrics.AreaSummaryMetric(area=18000, reduce_func=np.mean, decreasing=True, metricName='N Seasons (18k) %s' % f) ] bundleList.append( mb.MetricBundle(metric, slicer, filtersqls[f], plotDict=plotDict, metadata=filtermetadata[f], displayDict=displayDict, summaryMetrics=summary)) ######################### # Solar System ######################### # Generally, we need to run Solar System metrics separately; they're a multi-step process. ######################### # Cosmology ######################### displayDict = { 'group': 'Cosmology', 'subgroup': 'Galaxy Counts', 'order': 0, 'caption': None } plotDict = {'percentileClip': 95., 'nTicks': 5} sql = extraSql + joiner + 'filter="i"' metadata = combineMetadata(extraMetadata, 'i band') metric = GalaxyCountsMetric_extended(filterBand='i', redshiftBin='all', nside=nside) summary = [ metrics.AreaSummaryMetric(area=18000, reduce_func=np.sum, decreasing=True, metricName='N Galaxies (18k)') ] summary.append(metrics.SumMetric(metricName='N Galaxies (all)')) # make sure slicer has cache off slicer = slicers.HealpixSlicer(nside=nside, useCache=False) bundle = mb.MetricBundle(metric, slicer, sql, plotDict=plotDict, metadata=metadata, displayDict=displayDict, summaryMetrics=summary, plotFuncs=subsetPlots) bundleList.append(bundle) displayDict['order'] += 1 # let's put Type Ia SN in here displayDict['subgroup'] = 'SNe Ia' # XXX-- use the light curves from PLASTICC here displayDict['caption'] = 'Fraction of normal SNe Ia' sql = extraSql slicer = plasticc_slicer(plcs=plasticc_models_dict['SNIa-normal'], seed=42, badval=0) metric = Plasticc_metric(metricName='SNIa') # Set the maskval so that we count missing objects as zero. summary_stats = [metrics.MeanMetric(maskVal=0)] plotFuncs = [plots.HealpixSkyMap()] bundle = mb.MetricBundle(metric, slicer, sql, runName=runName, summaryMetrics=summary_stats, plotFuncs=plotFuncs, metadata=extraMetadata, displayDict=displayDict) bundleList.append(bundle) displayDict['order'] += 1 displayDict['subgroup'] = 'Camera Rotator' displayDict[ 'caption'] = 'Kuiper statistic (0 is uniform, 1 is delta function) of the ' slicer = slicers.HealpixSlicer(nside=nside) metric1 = metrics.KuiperMetric('rotSkyPos') metric2 = metrics.KuiperMetric('rotTelPos') for f in filterlist: for m in [metric1, metric2]: plotDict = {'color': colors[f]} displayDict['order'] = filterorders[f] displayDict['caption'] += f"{m.colname} for visits in {f} band." bundleList.append( mb.MetricBundle(m, slicer, filtersqls[f], plotDict=plotDict, displayDict=displayDict, summaryMetrics=standardStats, plotFuncs=subsetPlots)) # XXX--need some sort of metric for weak lensing ######################### # Variables and Transients ######################### displayDict = { 'group': 'Variables/Transients', 'subgroup': 'Periodic Stars', 'order': 0, 'caption': None } for period in [ 0.5, 1, 2, ]: for magnitude in [21., 24.]: amplitudes = [0.05, 0.1, 1.0] periods = [period] * len(amplitudes) starMags = [magnitude] * len(amplitudes) plotDict = { 'nTicks': 3, 'colorMin': 0, 'colorMax': 3, 'xMin': 0, 'xMax': 3 } metadata = combineMetadata( 'P_%.1f_Mag_%.0f_Amp_0.05-0.1-1' % (period, magnitude), extraMetadata) sql = None displayDict['caption'] = 'Metric evaluates if a periodic signal of period %.1f days could ' \ 'be detected for an r=%i star. A variety of amplitudes of periodicity ' \ 'are tested: [1, 0.1, and 0.05] mag amplitudes, which correspond to ' \ 'metric values of [1, 2, or 3]. ' % (period, magnitude) metric = metrics.PeriodicDetectMetric(periods=periods, starMags=starMags, amplitudes=amplitudes, metricName='PeriodDetection') bundle = mb.MetricBundle(metric, healslicer, sql, metadata=metadata, displayDict=displayDict, plotDict=plotDict, plotFuncs=subsetPlots, summaryMetrics=standardStats) bundleList.append(bundle) displayDict['order'] += 1 # XXX add some PLASTICC metrics for kilovnova and tidal disruption events. displayDict['subgroup'] = 'KN' displayDict['caption'] = 'Fraction of Kilonova (from PLASTICC)' displayDict['order'] = 0 slicer = plasticc_slicer(plcs=plasticc_models_dict['KN'], seed=43, badval=0) metric = Plasticc_metric(metricName='KN') plotFuncs = [plots.HealpixSkyMap()] summary_stats = [metrics.MeanMetric(maskVal=0)] bundle = mb.MetricBundle(metric, slicer, extraSql, runName=runName, summaryMetrics=summary_stats, plotFuncs=plotFuncs, metadata=extraMetadata, displayDict=displayDict) bundleList.append(bundle) # Tidal Disruption Events displayDict['subgroup'] = 'TDE' displayDict[ 'caption'] = 'Fraction of TDE lightcurves that could be identified, outside of DD fields' detectSNR = {'u': 5, 'g': 5, 'r': 5, 'i': 5, 'z': 5, 'y': 5} # light curve parameters epochStart = -22 peakEpoch = 0 nearPeakT = 10 postPeakT = 14 # two weeks nPhaseCheck = 1 # condition parameters nObsTotal = {'u': 0, 'g': 0, 'r': 0, 'i': 0, 'z': 0, 'y': 0} nObsPrePeak = 1 nObsNearPeak = {'u': 0, 'g': 0, 'r': 0, 'i': 0, 'z': 0, 'y': 0} nFiltersNearPeak = 3 nObsPostPeak = 0 nFiltersPostPeak = 2 metric = TDEsAsciiMetric(asciifile=None, detectSNR=detectSNR, epochStart=epochStart, peakEpoch=peakEpoch, nearPeakT=nearPeakT, postPeakT=postPeakT, nPhaseCheck=nPhaseCheck, nObsTotal=nObsTotal, nObsPrePeak=nObsPrePeak, nObsNearPeak=nObsNearPeak, nFiltersNearPeak=nFiltersNearPeak, nObsPostPeak=nObsPostPeak, nFiltersPostPeak=nFiltersPostPeak) slicer = slicers.HealpixSlicer(nside=32) sql = extraSql + joiner + "note not like '%DD%'" md = extraMetadata if md is None: md = " NonDD" else: md += 'NonDD' bundle = mb.MetricBundle(metric, slicer, sql, runName=runName, summaryMetrics=standardStats, plotFuncs=plotFuncs, metadata=md, displayDict=displayDict) bundleList.append(bundle) # XXX -- would be good to add some microlensing events, for both MW and LMC/SMC. ######################### # Milky Way ######################### displayDict = {'group': 'Milky Way', 'subgroup': ''} displayDict['subgroup'] = 'N stars' slicer = slicers.HealpixSlicer(nside=nside, useCache=False) sum_stats = [metrics.SumMetric(metricName='Total N Stars')] for f in filterlist: displayDict['order'] = filterorders[f] displayDict['caption'] = 'Number of stars in %s band with an measurement error due to crowding ' \ 'of less than 0.1 mag' % f # Configure the NstarsMetric - note 'filtername' refers to the filter in which to evaluate crowding metric = metrics.NstarsMetric(crowding_error=0.1, filtername='r', seeingCol=colmap['seeingGeom'], m5Col=colmap['fiveSigmaDepth']) plotDict = {'nTicks': 5, 'logScale': True, 'colorMin': 100} bundle = mb.MetricBundle(metric, slicer, filtersqls[f], runName=runName, summaryMetrics=sum_stats, plotFuncs=subsetPlots, plotDict=plotDict, displayDict=displayDict) bundleList.append(bundle) ######################### # DDF ######################### if DDF: # Hide this import to avoid adding a dependency. from lsst.sims.featureScheduler.surveys import generate_dd_surveys, Deep_drilling_survey ddf_surveys = generate_dd_surveys() # Add on the Euclid fields # XXX--to update. Should have a spot where all the DDF locations are stored. ddf_surveys.append( Deep_drilling_survey([], 58.97, -49.28, survey_name='DD:EDFSa')) ddf_surveys.append( Deep_drilling_survey([], 63.6, -47.60, survey_name='DD:EDFSb')) # For doing a high-res sampling of the DDF for co-adds ddf_radius = 1.8 # Degrees ddf_nside = 512 ra, dec = hpid2RaDec(ddf_nside, np.arange(hp.nside2npix(ddf_nside))) displayDict = {'group': 'DDF depths', 'subgroup': None} for survey in ddf_surveys: displayDict['subgroup'] = survey.survey_name # Crop off the u-band only DDF if survey.survey_name[0:4] != 'DD:u': dist_to_ddf = angularSeparation(ra, dec, np.degrees(survey.ra), np.degrees(survey.dec)) goodhp = np.where(dist_to_ddf <= ddf_radius) slicer = slicers.UserPointsSlicer(ra=ra[goodhp], dec=dec[goodhp], useCamera=False) for f in filterlist: metric = metrics.Coaddm5Metric( metricName=survey.survey_name + ', ' + f) summary = [ metrics.MedianMetric(metricName='Median depth ' + survey.survey_name + ', ' + f) ] plotDict = {'color': colors[f]} sql = filtersqls[f] displayDict['order'] = filterorders[f] displayDict['caption'] = 'Coadded m5 depth in %s band.' % ( f) bundle = mb.MetricBundle(metric, slicer, sql, metadata=filtermetadata[f], displayDict=displayDict, summaryMetrics=summary, plotFuncs=[], plotDict=plotDict) bundleList.append(bundle) displayDict = {'group': 'DDF Transients', 'subgroup': None} for survey in ddf_surveys: displayDict['subgroup'] = survey.survey_name if survey.survey_name[0:4] != 'DD:u': slicer = plasticc_slicer( plcs=plasticc_models_dict['SNIa-normal'], seed=42, ra_cen=survey.ra, dec_cen=survey.dec, radius=np.radians(3.), useCamera=False) metric = Plasticc_metric(metricName=survey.survey_name + ' SNIa') sql = extraSql summary_stats = [metrics.MeanMetric(maskVal=0)] plotFuncs = [plots.HealpixSkyMap()] bundle = mb.MetricBundle(metric, slicer, sql, runName=runName, summaryMetrics=summary_stats, plotFuncs=plotFuncs, metadata=extraMetadata, displayDict=displayDict) bundleList.append(bundle) displayDict['order'] = 10 # Set the runName for all bundles and return the bundleDict. for b in bundleList: b.setRunName(runName) bundleDict = mb.makeBundlesDictFromList(bundleList) return bundleDict
out_file = sys.argv[2] out_dir = sys.argv[2] gal_l_min = 0. gal_l_max = 360. gal_b_min = -89. gal_b_max = 89. diameter = 3.5 step = diameter / np.sqrt(2) # This would be enough on a 2D plane. step *= 0.85 gal_l_all = np.linspace(gal_l_min, gal_l_max, (gal_l_max-gal_l_min)/step+1) gal_b_all = np.linspace(gal_b_min, gal_b_max, (gal_b_max-gal_b_min)/step+1) (gal_l, gal_b) = np.meshgrid(gal_l_all, gal_b_all) c = SkyCoord(gal_l.flatten(), gal_b.flatten(), unit=u.deg, frame='galactic') userRA = c.fk5.ra.value userDec = c.fk5.dec.value columns = ['observationStartMJD', 'filter', 'fiveSigmaDepth'] metric = metrics.PassMetric(cols=columns) slicer = slicers.UserPointsSlicer(userRA, userDec) sqlconstraint = '' MJDmetric = metricBundles.MetricBundle(metric, slicer, sqlconstraint, fileRoot=out_file) bundleDict = {'MJDmetric': MJDmetric} opsdb = db.OpsimDatabase(database) group = metricBundles.MetricBundleGroup(bundleDict, opsdb, outDir=out_dir) group.runAll()
# Set the database and query runName = 'minion_1018' opsdb = db.OpsimDatabase(runName + '_sqlite.db') # Set the output directory outDir = 'Observations Dictionary' resultsDb = db.ResultsDb(outDir) # This creates our database of observations. The pass metric just passes data straight through. metric = metrics.PassMetric(cols=['expMJD', 'filter', 'fiveSigmaDepth']) """use slicer to restrict the ra and decs, use np.random.uniform to get random points, first coordinate represents ra and second dec. Or, give a list of specific ra and decs - the second slicer is for the deep drilling fields. One must be commented out.""" #slicer = slicers.UserPointsSlicer(np.random.uniform(0,360,1000), np.random.uniform(-80,0,1000)) slicer = slicers.UserPointsSlicer([349.4, 0.00, 53.0, 34.4, 150.4], [-63.3, -45.5, -27.4, -5.1, 2.8]) #sql is empty as there are no restrictions currently sql = '' bundle = metricBundles.MetricBundle(metric, slicer, sql) bg = metricBundles.MetricBundleGroup({0: bundle}, opsdb, outDir=outDir, resultsDb=resultsDb) bg.runAll() def createdict_for_mjd_filter_depth(bundle): """This function returns a list of tables of exposure day, filter, and five sigma depth for each ra and dec chosen""" number_of_coord = len(bundle.metricValues)
def scienceRadarBatch(colmap=None, runName='', extraSql=None, extraMetadata=None, nside=64, benchmarkArea=18000, benchmarkNvisits=825, DDF=True): """A batch of metrics for looking at survey performance relative to the SRD and the main science drivers of LSST. Parameters ---------- """ # Hide dependencies from mafContrib.LSSObsStrategy.galaxyCountsMetric_extended import GalaxyCountsMetric_extended from mafContrib import Plasticc_metric, plasticc_slicer, load_plasticc_lc if colmap is None: colmap = ColMapDict('opsimV4') if extraSql is None: extraSql = '' if extraSql == '': joiner = '' else: joiner = ' and ' bundleList = [] healslicer = slicers.HealpixSlicer(nside=nside) subsetPlots = [plots.HealpixSkyMap(), plots.HealpixHistogram()] # Load up the plastic light curves models = ['SNIa-normal', 'KN'] plasticc_models_dict = {} for model in models: plasticc_models_dict[model] = list( load_plasticc_lc(model=model).values()) ######################### # SRD, DM, etc ######################### sql = extraSql displayDict = { 'group': 'SRD', 'subgroup': 'fO', 'order': 0, 'caption': None } metric = metrics.CountMetric(col=colmap['mjd'], metricName='fO') plotDict = { 'xlabel': 'Number of Visits', 'Asky': benchmarkArea, 'Nvisit': benchmarkNvisits, 'xMin': 0, 'xMax': 1500 } summaryMetrics = [ metrics.fOArea(nside=nside, norm=False, metricName='fOArea', Asky=benchmarkArea, Nvisit=benchmarkNvisits), metrics.fOArea(nside=nside, norm=True, metricName='fOArea/benchmark', Asky=benchmarkArea, Nvisit=benchmarkNvisits), metrics.fONv(nside=nside, norm=False, metricName='fONv', Asky=benchmarkArea, Nvisit=benchmarkNvisits), metrics.fONv(nside=nside, norm=True, metricName='fONv/benchmark', Asky=benchmarkArea, Nvisit=benchmarkNvisits) ] caption = 'The FO metric evaluates the overall efficiency of observing. ' caption += ( 'foNv: out of %.2f sq degrees, the area receives at least X and a median of Y visits ' '(out of %d, if compared to benchmark). ' % (benchmarkArea, benchmarkNvisits)) caption += ('fOArea: this many sq deg (out of %.2f sq deg if compared ' 'to benchmark) receives at least %d visits. ' % (benchmarkArea, benchmarkNvisits)) displayDict['caption'] = caption bundle = mb.MetricBundle(metric, healslicer, sql, plotDict=plotDict, displayDict=displayDict, summaryMetrics=summaryMetrics, plotFuncs=[plots.FOPlot()]) bundleList.append(bundle) displayDict['order'] += 1 displayDict = { 'group': 'SRD', 'subgroup': 'Gaps', 'order': 0, 'caption': None } plotDict = {'percentileClip': 95.} for filtername in 'ugrizy': sql = extraSql + joiner + 'filter ="%s"' % filtername metric = metrics.MaxGapMetric() summaryMetrics = [ metrics.PercentileMetric( percentile=95, metricName='95th percentile of Max gap, %s' % filtername) ] bundle = mb.MetricBundle(metric, healslicer, sql, plotFuncs=subsetPlots, summaryMetrics=summaryMetrics, displayDict=displayDict, plotDict=plotDict) bundleList.append(bundle) displayDict['order'] += 1 ######################### # Solar System ######################### # XXX -- may want to do Solar system seperatly # XXX--fraction of NEOs detected (assume some nominal size and albido) # XXX -- fraction of MBAs detected # XXX -- fraction of KBOs detected # XXX--any others? Planet 9s? Comets? Neptune Trojans? ######################### # Cosmology ######################### displayDict = { 'group': 'Cosmology', 'subgroup': 'galaxy counts', 'order': 0, 'caption': None } plotDict = {'percentileClip': 95.} sql = extraSql + joiner + 'filter="i"' metric = GalaxyCountsMetric_extended(filterBand='i', redshiftBin='all', nside=nside) summary = [ metrics.AreaSummaryMetric(area=18000, reduce_func=np.sum, decreasing=True, metricName='N Galaxies (WFD)') ] summary.append(metrics.SumMetric(metricName='N Galaxies (all)')) # make sure slicer has cache off slicer = slicers.HealpixSlicer(nside=nside, useCache=False) bundle = mb.MetricBundle(metric, slicer, sql, plotDict=plotDict, displayDict=displayDict, summaryMetrics=summary, plotFuncs=subsetPlots) bundleList.append(bundle) displayDict['order'] += 1 # let's put Type Ia SN in here displayDict['subgroup'] = 'SNe Ia' metadata = '' # XXX-- use the light curves from PLASTICC here displayDict['Caption'] = 'Fraction of normal SNe Ia' sql = '' slicer = plasticc_slicer(plcs=plasticc_models_dict['SNIa-normal'], seed=42, badval=0) metric = Plasticc_metric(metricName='SNIa') # Set the maskval so that we count missing objects as zero. summary_stats = [metrics.MeanMetric(maskVal=0)] plotFuncs = [plots.HealpixSkyMap()] bundle = mb.MetricBundle(metric, slicer, sql, runName=runName, summaryMetrics=summary_stats, plotFuncs=plotFuncs, metadata=metadata, displayDict=displayDict) bundleList.append(bundle) displayDict['order'] += 1 # XXX--need some sort of metric for weak lensing and camera rotation. ######################### # Variables and Transients ######################### displayDict = { 'group': 'Variables and Transients', 'subgroup': 'Periodic Stars', 'order': 0, 'caption': None } periods = [0.1, 0.5, 1., 2., 5., 10., 20.] # days plotDict = {} metadata = '' sql = extraSql displayDict[ 'Caption'] = 'Measure of how well a periodic signal can be measured combining amplitude and phase coverage. 1 is perfect, 0 is no way to fit' for period in periods: summary = metrics.PercentileMetric( percentile=10., metricName='10th %%-ile Periodic Quality, Period=%.1f days' % period) metric = metrics.PeriodicQualityMetric( period=period, starMag=20., metricName='Periodic Stars, P=%.1f d' % period) bundle = mb.MetricBundle(metric, healslicer, sql, metadata=metadata, displayDict=displayDict, plotDict=plotDict, plotFuncs=subsetPlots, summaryMetrics=summary) bundleList.append(bundle) displayDict['order'] += 1 # XXX add some PLASTICC metrics for kilovnova and tidal disruption events. displayDict['subgroup'] = 'KN' displayDict['caption'] = 'Fraction of Kilonova (from PLASTICC)' sql = '' slicer = plasticc_slicer(plcs=plasticc_models_dict['KN'], seed=43, badval=0) metric = Plasticc_metric(metricName='KN') summary_stats = [metrics.MeanMetric(maskVal=0)] plotFuncs = [plots.HealpixSkyMap()] bundle = mb.MetricBundle(metric, slicer, sql, runName=runName, summaryMetrics=summary_stats, plotFuncs=plotFuncs, metadata=metadata, displayDict=displayDict) bundleList.append(bundle) displayDict['order'] += 1 # XXX -- would be good to add some microlensing events, for both MW and LMC/SMC. ######################### # Milky Way ######################### # Let's do the proper motion, parallax, and DCR degen of a 20nd mag star rmag = 20. displayDict = { 'group': 'Milky Way', 'subgroup': 'Astrometry', 'order': 0, 'caption': None } sql = extraSql metadata = '' plotDict = {'percentileClip': 95.} metric = metrics.ParallaxMetric(metricName='Parallax Error r=%.1f' % (rmag), rmag=rmag, seeingCol=colmap['seeingGeom'], filterCol=colmap['filter'], m5Col=colmap['fiveSigmaDepth'], normalize=False) summary = [ metrics.AreaSummaryMetric(area=18000, reduce_func=np.median, decreasing=False, metricName='Median Parallax Error (WFD)') ] summary.append( metrics.PercentileMetric(percentile=95, metricName='95th Percentile Parallax Error')) bundle = mb.MetricBundle(metric, healslicer, sql, metadata=metadata, displayDict=displayDict, plotDict=plotDict, plotFuncs=subsetPlots, summaryMetrics=summary) bundleList.append(bundle) displayDict['order'] += 1 metric = metrics.ProperMotionMetric( metricName='Proper Motion Error r=%.1f' % rmag, rmag=rmag, m5Col=colmap['fiveSigmaDepth'], mjdCol=colmap['mjd'], filterCol=colmap['filter'], seeingCol=colmap['seeingGeom'], normalize=False) summary = [ metrics.AreaSummaryMetric( area=18000, reduce_func=np.median, decreasing=False, metricName='Median Proper Motion Error (WFD)') ] summary.append( metrics.PercentileMetric( metricName='95th Percentile Proper Motion Error')) bundle = mb.MetricBundle(metric, healslicer, sql, metadata=metadata, displayDict=displayDict, plotDict=plotDict, summaryMetrics=summary, plotFuncs=subsetPlots) bundleList.append(bundle) displayDict['order'] += 1 metric = metrics.ParallaxDcrDegenMetric( metricName='Parallax-DCR degeneracy r=%.1f' % (rmag), rmag=rmag, seeingCol=colmap['seeingEff'], filterCol=colmap['filter'], m5Col=colmap['fiveSigmaDepth']) caption = 'Correlation between parallax offset magnitude and hour angle for a r=%.1f star.' % ( rmag) caption += ' (0 is good, near -1 or 1 is bad).' # XXX--not sure what kind of summary to do here summary = [metrics.MeanMetric(metricName='Mean DCR Degeneracy')] bundle = mb.MetricBundle(metric, healslicer, sql, metadata=metadata, displayDict=displayDict, summaryMetrics=summary, plotFuncs=subsetPlots) bundleList.append(bundle) displayDict['order'] += 1 for b in bundleList: b.setRunName(runName) ######################### # DDF ######################### ddf_time_bundleDicts = [] if DDF: # Hide this import to avoid adding a dependency. from lsst.sims.featureScheduler.surveys import generate_dd_surveys ddf_surveys = generate_dd_surveys() # For doing a high-res sampling of the DDF for co-adds ddf_radius = 1.8 # Degrees ddf_nside = 512 ra, dec = hpid2RaDec(ddf_nside, np.arange(hp.nside2npix(ddf_nside))) displayDict = { 'group': 'DDF depths', 'subgroup': None, 'order': 0, 'caption': None } # Run the inter and intra gaps at the center of the DDFs for survey in ddf_surveys: slicer = slicers.UserPointsSlicer(ra=np.degrees(survey.ra), dec=np.degrees(survey.dec), useCamera=False) ddf_time_bundleDicts.append( interNight(colmap=colmap, slicer=slicer, runName=runName, nside=64, extraSql='note="%s"' % survey.survey_name, subgroup=survey.survey_name)[0]) ddf_time_bundleDicts.append( intraNight(colmap=colmap, slicer=slicer, runName=runName, nside=64, extraSql='note="%s"' % survey.survey_name, subgroup=survey.survey_name)[0]) for survey in ddf_surveys: displayDict['subgroup'] = survey.survey_name # Crop off the u-band only DDF if survey.survey_name[0:4] != 'DD:u': dist_to_ddf = angularSeparation(ra, dec, np.degrees(survey.ra), np.degrees(survey.dec)) goodhp = np.where(dist_to_ddf <= ddf_radius) slicer = slicers.UserPointsSlicer(ra=ra[goodhp], dec=dec[goodhp], useCamera=False) for filtername in ['u', 'g', 'r', 'i', 'z', 'y']: metric = metrics.Coaddm5Metric( metricName=survey.survey_name + ', ' + filtername) summary = [ metrics.MedianMetric(metricName='median depth ' + survey.survey_name + ', ' + filtername) ] sql = extraSql + joiner + 'filter = "%s"' % filtername bundle = mb.MetricBundle(metric, slicer, sql, metadata=metadata, displayDict=displayDict, summaryMetrics=summary, plotFuncs=[]) bundleList.append(bundle) displayDict['order'] += 1 displayDict = { 'group': 'DDF Transients', 'subgroup': None, 'order': 0, 'caption': None } for survey in ddf_surveys: displayDict['subgroup'] = survey.survey_name if survey.survey_name[0:4] != 'DD:u': slicer = plasticc_slicer( plcs=plasticc_models_dict['SNIa-normal'], seed=42, ra_cen=survey.ra, dec_cen=survey.dec, radius=np.radians(3.), useCamera=False) metric = Plasticc_metric(metricName=survey.survey_name + ' SNIa') sql = '' summary_stats = [metrics.MeanMetric(maskVal=0)] plotFuncs = [plots.HealpixSkyMap()] bundle = mb.MetricBundle(metric, slicer, sql, runName=runName, summaryMetrics=summary_stats, plotFuncs=plotFuncs, metadata=metadata, displayDict=displayDict) bundleList.append(bundle) displayDict['order'] += 1 for b in bundleList: b.setRunName(runName) bundleDict = mb.makeBundlesDictFromList(bundleList) intraDict = intraNight(colmap=colmap, runName=runName, nside=nside, extraSql=extraSql, extraMetadata=extraMetadata)[0] interDict = interNight(colmap=colmap, runName=runName, nside=nside, extraSql=extraSql, extraMetadata=extraMetadata)[0] bundleDict.update(intraDict) bundleDict.update(interDict) for ddf_time in ddf_time_bundleDicts: bundleDict.update(ddf_time) return bundleDict
def getMetrics(self): colmn = 'observationStartMJD'; opsdb = db.OpsimDatabase(self.opsim) # Directory where tmp files are going to be stored TODO eliminate - this outDir = 'TmpDir' resultsDb = db.ResultsDb(outDir=outDir) metric=metrics.PassMetric(cols=[colmn,'fiveSigmaDepth', 'filter']) slicer = slicers.UserPointsSlicer(ra=self.ra,dec=self.dec) sqlconstraint = 'filter = \'' + self.fil + '\'' bundle = mb.MetricBundle(metric, slicer, sqlconstraint, runName=self.name) bgroup = mb.MetricBundleGroup({0: bundle}, opsdb, outDir=outDir, resultsDb=resultsDb) bgroup.runAll(); filters = np.unique(bundle.metricValues[0]['filter']) mv = bundle.metricValues[0] # Get dates self.mjd = mv[colmn] self.mjd = np.sort(self.mjd) # Define redshift bins zbin = np.linspace(0.5,7.5,8) zbin = np.insert(zbin,0,0) # Converting MJD to survey days T=np.int(self.mjd.max()-self.mjd.min()+1) swop=[] wedgeop=[] scop=[] edgecop=[] i=0 total = len(zbin)*(self.nlc); progress = 0; # We generate a number (nlc) of light curves for each redshift bin for z in zbin: for w in range(self.nlc): # Generating continuous light curve (cadence=1d) tt, yy = drw_artificial_lc(T, z=z, frame=self.frame) sn, edgesn = self.sf(tt,yy,z=z) # Calculating SF for the current continuous light curve scop.append(sn) edgecop.append(edgesn) self.edgesn = edgesn # Generating OpSim light curve evaluated on the current continuous light curve top,yop=self.__opsim_lc(tt,yy) # Calculating SF for the current OpSim light curve srol,edgesrol=self.sf(top,yop,z=z) swop.append(srol) wedgeop.append(edgesrol) #progressBar(progress, total); progress = progress + 1; i=i+1 # counter swop=np.asarray(swop) swop=swop.reshape(9,self.nlc,99) scop=np.asarray(scop) scop=scop.reshape(9,self.nlc,99) razrol=[] for z in range(9): for r in range(self.nlc): # Calculating the SF metric razrol.append((np.nan_to_num(np.sqrt(scop[z,r,:]))-np.nan_to_num(np.sqrt(swop[z,r,:])))) razrol9=np.asarray(razrol) razrol9=razrol9.reshape(9,self.nlc,99) # We take the mean of generated light curves for each redshift bin. self.raz2=np.nanmean(razrol9[:,:,:],axis=1)
def generateMicrolensingSlicer(min_crossing_time=1, max_crossing_time=10, t_start=1, t_end=3652, n_events=10000, seed=42, nside=128, filtername='r'): """ Generate a UserPointSlicer with a population of microlensing events. To be used with MicrolensingMetric Parameters ---------- min_crossing_time : float (1) The minimum crossing time for the events generated (days) max_crossing_time : float (10) The max crossing time for the events generated (days) t_start : float (1) The night to start generating peaks (days) t_end : float (3652) The night to end generating peaks (days) n_events : int (10000) Number of microlensing events to generate seed : float (42) Random number seed nside : int (128) HEALpix nside, used to pick which stellar density map to load filtername : str ('r') The filter to use for the stellar density map """ np.random.seed(seed) crossing_times = np.random.uniform(low=min_crossing_time, high=max_crossing_time, size=n_events) peak_times = np.random.uniform(low=t_start, high=t_end, size=n_events) impact_paramters = np.random.uniform(low=0, high=1, size=n_events) mapDir = os.path.join(getPackageDir('sims_maps'), 'TriMaps') data = np.load( os.path.join(mapDir, 'TRIstarDensity_%s_nside_%i.npz' % (filtername, nside))) starDensity = data['starDensity'].copy() # magnitude bins bins = data['bins'].copy() data.close() star_mag = 22 bin_indx = np.where(bins[1:] >= star_mag)[0].min() density_used = starDensity[:, bin_indx].ravel() order = np.argsort(density_used) # I think the model might have a few outliers at the extreme, let's truncate it a bit density_used[order[-10:]] = density_used[order[-11]] # now, let's draw N from that distribution squared dist = density_used[order]**2 cumm_dist = np.cumsum(dist) cumm_dist = cumm_dist / np.max(cumm_dist) uniform_draw = np.random.uniform(size=n_events) indexes = np.floor( np.interp(uniform_draw, cumm_dist, np.arange(cumm_dist.size))) hp_ids = order[indexes.astype(int)] gal_l, gal_b = hpid2RaDec(nside, hp_ids, nest=True) ra, dec = equatorialFromGalactic(gal_l, gal_b) # Set up the slicer to evaluate the catalog we just made slicer = slicers.UserPointsSlicer(ra, dec, latLonDeg=True, badval=0) # Add any additional information about each object to the slicer slicer.slicePoints['peak_time'] = peak_times slicer.slicePoints['crossing_time'] = crossing_times slicer.slicePoints['impact_parameter'] = impact_paramters return slicer
Dls = cosmo.angular_diameter_distance_z1z2( myinput['system']['zl'], myinput['system']['zs']).value # in Mpc print(">>>>>>>>>>>>>>>>>> Reading LSST dates and depths <<<<<<<<<<<<<<<<<<<<<") outDir = outdir + '/output' dbFile = path_to_dbfile + runName + '.db' opsimdb = db.opsimDatabase.OpsimDatabase(dbFile) resultsDb = db.ResultsDb(outDir=outDir) #ra=ra*15.0 # SNR limit (Don't use points below this limit) snrLimit = 5.0 # The pass metric just passes data straight through. metric = metrics.PassMetric( cols=['filter', 'fiveSigmaDepth', 'observationStartMJD']) slicer = slicers.UserPointsSlicer(ra, dec, lonCol='fieldRA', latCol='fieldDec') sql = '' bundle = metricBundles.MetricBundle(metric, slicer, sql) bg = metricBundles.MetricBundleGroup({0: bundle}, opsimdb, outDir=outDir, resultsDb=resultsDb) bg.runAll() bundle.metricValues.data[0]['filter'] filters = myinput['filters'] print('%i Observations total at this point (All SNR levels)' % bundle.metricValues.data[0].size) for fname in filters: good = numpy.where(bundle.metricValues.data[0]['filter'] == fname) print('%i Observations in %s' % (good[0].size, fname))
def scienceRadarBatch(colmap=None, runName='opsim', extraSql=None, extraMetadata=None, nside=64, benchmarkArea=18000, benchmarkNvisits=825, DDF=True): """A batch of metrics for looking at survey performance relative to the SRD and the main science drivers of LSST. Parameters ---------- """ # Hide dependencies from mafContrib.LSSObsStrategy.galaxyCountsMetric_extended import GalaxyCountsMetric_extended from mafContrib import (Plasticc_metric, plasticc_slicer, load_plasticc_lc, TdePopMetric, generateTdePopSlicer, generateMicrolensingSlicer, MicrolensingMetric) if colmap is None: colmap = ColMapDict('fbs') if extraSql is None: extraSql = '' if extraSql == '': joiner = '' else: joiner = ' and ' bundleList = [] # Get some standard per-filter coloring and sql constraints filterlist, colors, filterorders, filtersqls, filtermetadata = filterList(all=False, extraSql=extraSql, extraMetadata=extraMetadata) standardStats = standardSummary(withCount=False) healslicer = slicers.HealpixSlicer(nside=nside) subsetPlots = [plots.HealpixSkyMap(), plots.HealpixHistogram()] # Load up the plastic light curves - SNIa-normal are loaded in descWFDBatch models = ['SNIa-normal', 'KN'] plasticc_models_dict = {} for model in models: plasticc_models_dict[model] = list(load_plasticc_lc(model=model).values()) ######################### # SRD, DM, etc ######################### fOb = fOBatch(runName=runName, colmap=colmap, extraSql=extraSql, extraMetadata=extraMetadata, benchmarkArea=benchmarkArea, benchmarkNvisits=benchmarkNvisits) astromb = astrometryBatch(runName=runName, colmap=colmap, extraSql=extraSql, extraMetadata=extraMetadata) rapidb = rapidRevisitBatch(runName=runName, colmap=colmap, extraSql=extraSql, extraMetadata=extraMetadata) # loop through and modify the display dicts - set SRD as group and their previous 'group' as the subgroup temp_list = [] for key in fOb: temp_list.append(fOb[key]) for key in astromb: temp_list.append(astromb[key]) for key in rapidb: temp_list.append(rapidb[key]) for metricb in temp_list: metricb.displayDict['subgroup'] = metricb.displayDict['group'].replace('SRD', '').lstrip(' ') metricb.displayDict['group'] = 'SRD' bundleList.extend(temp_list) displayDict = {'group': 'SRD', 'subgroup': 'Year Coverage', 'order': 0, 'caption': 'Number of years with observations.'} slicer = slicers.HealpixSlicer(nside=nside) metric = metrics.YearCoverageMetric() for f in filterlist: plotDict = {'colorMin': 7, 'colorMax': 10, 'color': colors[f]} summary = [metrics.AreaSummaryMetric(area=18000, reduce_func=np.mean, decreasing=True, metricName='N Seasons (18k) %s' % f)] bundleList.append(mb.MetricBundle(metric, slicer, filtersqls[f], plotDict=plotDict, metadata=filtermetadata[f], displayDict=displayDict, summaryMetrics=summary)) ######################### # Solar System ######################### # Generally, we need to run Solar System metrics separately; they're a multi-step process. ######################### # Galaxies ######################### displayDict = {'group': 'Galaxies', 'subgroup': 'Galaxy Counts', 'order': 0, 'caption': None} plotDict = {'percentileClip': 95., 'nTicks': 5} sql = extraSql + joiner + 'filter="i"' metadata = combineMetadata(extraMetadata, 'i band') metric = GalaxyCountsMetric_extended(filterBand='i', redshiftBin='all', nside=nside) summary = [metrics.AreaSummaryMetric(area=18000, reduce_func=np.sum, decreasing=True, metricName='N Galaxies (18k)')] summary.append(metrics.SumMetric(metricName='N Galaxies (all)')) # make sure slicer has cache off slicer = slicers.HealpixSlicer(nside=nside, useCache=False) displayDict['caption'] = 'Number of galaxies across the sky, in i band. Generally, full survey footprint.' bundle = mb.MetricBundle(metric, slicer, sql, plotDict=plotDict, metadata=metadata, displayDict=displayDict, summaryMetrics=summary, plotFuncs=subsetPlots) bundleList.append(bundle) displayDict['order'] += 1 ######################### # Cosmology ######################### # note the desc batch does not currently take the extraSql or extraMetadata arguments. descBundleDict = descWFDBatch(colmap=colmap, runName=runName, nside=nside) for d in descBundleDict: bundleList.append(descBundleDict[d]) ######################### # Variables and Transients ######################### displayDict = {'group': 'Variables/Transients', 'subgroup': 'Periodic Stars', 'order': 0, 'caption': None} for period in [0.5, 1, 2,]: for magnitude in [21., 24.]: amplitudes = [0.05, 0.1, 1.0] periods = [period] * len(amplitudes) starMags = [magnitude] * len(amplitudes) plotDict = {'nTicks': 3, 'colorMin': 0, 'colorMax': 3, 'xMin': 0, 'xMax': 3} metadata = combineMetadata('P_%.1f_Mag_%.0f_Amp_0.05-0.1-1' % (period, magnitude), extraMetadata) sql = None displayDict['caption'] = 'Metric evaluates if a periodic signal of period %.1f days could ' \ 'be detected for an r=%i star. A variety of amplitudes of periodicity ' \ 'are tested: [1, 0.1, and 0.05] mag amplitudes, which correspond to ' \ 'metric values of [1, 2, or 3]. ' % (period, magnitude) metric = metrics.PeriodicDetectMetric(periods=periods, starMags=starMags, amplitudes=amplitudes, metricName='PeriodDetection') bundle = mb.MetricBundle(metric, healslicer, sql, metadata=metadata, displayDict=displayDict, plotDict=plotDict, plotFuncs=subsetPlots, summaryMetrics=standardStats) bundleList.append(bundle) displayDict['order'] += 1 # XXX add some PLASTICC metrics for kilovnova and tidal disruption events. displayDict['subgroup'] = 'KN' displayDict['caption'] = 'Fraction of Kilonova (from PLASTICC)' displayDict['order'] = 0 slicer = plasticc_slicer(plcs=plasticc_models_dict['KN'], seed=43, badval=0) metric = Plasticc_metric(metricName='KN') plotFuncs = [plots.HealpixSkyMap()] summary_stats = [metrics.MeanMetric(maskVal=0)] bundle = mb.MetricBundle(metric, slicer, extraSql, runName=runName, summaryMetrics=summary_stats, plotFuncs=plotFuncs, metadata=extraMetadata, displayDict=displayDict) bundleList.append(bundle) # Tidal Disruption Events displayDict['subgroup'] = 'TDE' displayDict['caption'] = 'TDE lightcurves that could be identified' metric = TdePopMetric() slicer = generateTdePopSlicer() sql = '' plotDict = {'reduceFunc': np.sum, 'nside': 128} plotFuncs = [plots.HealpixSkyMap()] bundle = mb.MetricBundle(metric, slicer, sql, runName=runName, plotDict=plotDict, plotFuncs=plotFuncs, summaryMetrics=[metrics.MeanMetric(maskVal=0)], displayDict=displayDict) bundleList.append(bundle) # Microlensing events displayDict['subgroup'] = 'Microlensing' displayDict['caption'] = 'Fast microlensing events' plotDict = {'nside': 128} sql = '' slicer = generateMicrolensingSlicer(min_crossing_time=1, max_crossing_time=10) metric = MicrolensingMetric(metricName='Fast Microlensing') bundle = mb.MetricBundle(metric, slicer, sql, runName=runName, summaryMetrics=[metrics.MeanMetric(maskVal=0)], plotFuncs=[plots.HealpixSkyMap()], metadata=extraMetadata, displayDict=displayDict, plotDict=plotDict) bundleList.append(bundle) displayDict['caption'] = 'Slow microlensing events' slicer = generateMicrolensingSlicer(min_crossing_time=100, max_crossing_time=1500) metric = MicrolensingMetric(metricName='Slow Microlensing') bundle = mb.MetricBundle(metric, slicer, sql, runName=runName, summaryMetrics=[metrics.MeanMetric(maskVal=0)], plotFuncs=[plots.HealpixSkyMap()], metadata=extraMetadata, displayDict=displayDict, plotDict=plotDict) bundleList.append(bundle) ######################### # Milky Way ######################### displayDict = {'group': 'Milky Way', 'subgroup': ''} displayDict['subgroup'] = 'N stars' slicer = slicers.HealpixSlicer(nside=nside, useCache=False) sum_stats = [metrics.SumMetric(metricName='Total N Stars, crowding')] for f in filterlist: stellar_map = maps.StellarDensityMap(filtername=f) displayDict['order'] = filterorders[f] displayDict['caption'] = 'Number of stars in %s band with an measurement error due to crowding ' \ 'of less than 0.2 mag' % f # Configure the NstarsMetric - note 'filtername' refers to the filter in which to evaluate crowding metric = metrics.NstarsMetric(crowding_error=0.2, filtername=f, ignore_crowding=False, seeingCol=colmap['seeingGeom'], m5Col=colmap['fiveSigmaDepth'], maps=[]) plotDict = {'nTicks': 5, 'logScale': True, 'colorMin': 100} bundle = mb.MetricBundle(metric, slicer, filtersqls[f], runName=runName, summaryMetrics=sum_stats, plotFuncs=subsetPlots, plotDict=plotDict, displayDict=displayDict, mapsList=[stellar_map]) bundleList.append(bundle) slicer = slicers.HealpixSlicer(nside=nside, useCache=False) sum_stats = [metrics.SumMetric(metricName='Total N Stars, no crowding')] for f in filterlist: stellar_map = maps.StellarDensityMap(filtername=f) displayDict['order'] = filterorders[f] displayDict['caption'] = 'Number of stars in %s band with an measurement error ' \ 'of less than 0.2 mag, not considering crowding' % f # Configure the NstarsMetric - note 'filtername' refers to the filter in which to evaluate crowding metric = metrics.NstarsMetric(crowding_error=0.2, filtername=f, ignore_crowding=True, seeingCol=colmap['seeingGeom'], m5Col=colmap['fiveSigmaDepth'], metricName='Nstars_no_crowding', maps=[]) plotDict = {'nTicks': 5, 'logScale': True, 'colorMin': 100} bundle = mb.MetricBundle(metric, slicer, filtersqls[f], runName=runName, summaryMetrics=sum_stats, plotFuncs=subsetPlots, plotDict=plotDict, displayDict=displayDict, mapsList=[stellar_map]) bundleList.append(bundle) ######################### # DDF ######################### if DDF: # Hide this import to avoid adding a dependency. from lsst.sims.featureScheduler.surveys import generate_dd_surveys, Deep_drilling_survey ddf_surveys = generate_dd_surveys() # Add on the Euclid fields # XXX--to update. Should have a spot where all the DDF locations are stored. ddf_surveys.append(Deep_drilling_survey([], 58.97, -49.28, survey_name='DD:EDFSa')) ddf_surveys.append(Deep_drilling_survey([], 63.6, -47.60, survey_name='DD:EDFSb')) # For doing a high-res sampling of the DDF for co-adds ddf_radius = 1.8 # Degrees ddf_nside = 512 ra, dec = hpid2RaDec(ddf_nside, np.arange(hp.nside2npix(ddf_nside))) displayDict = {'group': 'DDF depths', 'subgroup': None} for survey in ddf_surveys: displayDict['subgroup'] = survey.survey_name # Crop off the u-band only DDF if survey.survey_name[0:4] != 'DD:u': dist_to_ddf = angularSeparation(ra, dec, np.degrees(survey.ra), np.degrees(survey.dec)) goodhp = np.where(dist_to_ddf <= ddf_radius) slicer = slicers.UserPointsSlicer(ra=ra[goodhp], dec=dec[goodhp], useCamera=False) for f in filterlist: metric = metrics.Coaddm5Metric(metricName=survey.survey_name + ', ' + f) summary = [metrics.MedianMetric(metricName='Median depth ' + survey.survey_name+', ' + f)] plotDict = {'color': colors[f]} sql = filtersqls[f] displayDict['order'] = filterorders[f] displayDict['caption'] = 'Coadded m5 depth in %s band.' % (f) bundle = mb.MetricBundle(metric, slicer, sql, metadata=filtermetadata[f], displayDict=displayDict, summaryMetrics=summary, plotFuncs=[], plotDict=plotDict) bundleList.append(bundle) displayDict = {'group': 'DDF Transients', 'subgroup': None} for survey in ddf_surveys: displayDict['subgroup'] = survey.survey_name if survey.survey_name[0:4] != 'DD:u': slicer = plasticc_slicer(plcs=plasticc_models_dict['SNIa-normal'], seed=42, ra_cen=survey.ra, dec_cen=survey.dec, radius=np.radians(3.), useCamera=False) metric = Plasticc_metric(metricName=survey.survey_name+' SNIa') sql = extraSql summary_stats = [metrics.MeanMetric(maskVal=0)] plotFuncs = [plots.HealpixSkyMap()] bundle = mb.MetricBundle(metric, slicer, sql, runName=runName, summaryMetrics=summary_stats, plotFuncs=plotFuncs, metadata=extraMetadata, displayDict=displayDict) bundleList.append(bundle) displayDict['order'] = 10 # Set the runName for all bundles and return the bundleDict. for b in bundleList: b.setRunName(runName) bundleDict = mb.makeBundlesDictFromList(bundleList) return bundleDict
def spot_inspect(filename, ra, dec, year_max=8.5, outDir='temp', season_pad=80): resultsDb = db.ResultsDb(outDir=outDir) f2c = { 'u': 'purple', 'g': 'blue', 'r': 'green', 'i': 'cyan', 'z': 'orange', 'y': 'red' } name = filename.replace('_v1.7_10yrs.db', '') conn = db.OpsimDatabase(filename) bundleList = [] sql = '' #'night > 250 and night < %i' % (365*year_max) metric = metrics.PassMetric( ['filter', 'observationStartMJD', 'fiveSigmaDepth', 'night']) slicer = slicers.UserPointsSlicer(ra=ra, dec=dec) summaryStats = [] plotDict = {} bundleList.append( metricBundles.MetricBundle(metric, slicer, sql, plotDict=plotDict, summaryMetrics=summaryStats, runName=name)) bd = metricBundles.makeBundlesDictFromList(bundleList) bg = metricBundles.MetricBundleGroup(bd, conn, outDir=outDir, resultsDb=resultsDb) bg.runAll() #bg.plotAll(closefigs=False) mv = bundleList[0].metricValues[0] mv.sort(order='observationStartMJD') all_mjd = np.arange(mv['observationStartMJD'].min() - 1, mv['observationStartMJD'].max() + 2, 1) breaks_indx = season_breaks(all_mjd, ra) breaks1 = all_mjd[breaks_indx] - all_mjd.min() + mv['night'].min() breaks = [mv['night'].min() - 1] breaks.extend(breaks1.tolist()) breaks.append(mv['night'].max() + 3) #breaks = np.array([mv['night'].min()-season_pad] + breaks.tolist() + [mv['night'].max()+season_pad]) fig = plt.figure() ax1 = fig.add_subplot(1, 1, 1) di = np.diff(breaks) mps = breaks[0:-1] + di / 2 counts, med_gaps, unights = gap_stats(mv['night'], bins=breaks) for fn in f2c: in_filt = np.where(mv['filter'] == fn)[0] ax1.plot(mv['night'][in_filt], mv['fiveSigmaDepth'][in_filt], 'o', color=f2c[fn], label=fn, alpha=0.5) ax1.set_xlabel('Night') ax1.set_ylabel(r'5$\sigma$ depth (mags)') for i in np.arange(mps.size): plt.annotate('%i\n %.1f \n %i' % (counts[i], med_gaps[i], unights[i]), [mps[i], 20]) #plt.legend(loc=(1.04,0)) for br in breaks: ax1.axvline(br) ax1.set_ylim([19.5, 25.5]) #plt.xlim([1340, 1560]) ax1.set_title(name + '\nra=%.2f, dec=%.2f' % (ra, dec)) return fig, ax1
# OneDSlicer slicer = slicers.OneDSlicer(sliceColName='night', binsize=10) metric = metrics.CountMetric(col='expMJD') bundle = metricBundles.MetricBundle(metric, slicer, sqlWhere) bundleList.append(bundle) # OpsimFieldSlicer slicer = slicers.OpsimFieldSlicer() metric = metrics.MeanMetric(col='airmass') bundle = metricBundles.MetricBundle(metric, slicer, sqlWhere) bundleList.append(bundle) # UserPointsSlicer ra = np.arange(0, 101, 1) / 100. * np.pi dec = np.arange(0, 101, 1) / 100. * (-np.pi) slicer = slicers.UserPointsSlicer(ra=ra, dec=dec) metric = metrics.MeanMetric(col='airmass', metricName='meanAirmass_user') bundle = metricBundles.MetricBundle(metric, slicer, sqlWhere) bundleList.append(bundle) # UserPointsSlicer, turn on the camera focal plane geometry slicer = slicers.UserPointsSlicer(ra=ra, dec=dec, useCamera=True) metric = metrics.MeanMetric(col='airmass', metricName='meanAirmass_user_w_camera') bundle = metricBundles.MetricBundle(metric, slicer, sqlWhere) bundleList.append(bundle) # healpixComplexSlicer (healpix slicer + summaryHistogram) bins = np.arange(0.5, 3.0, 0.1) slicer = slicers.HealpixSlicer(nside=16) metric = metrics.TgapsMetric(bins=bins)