def tdcBatch(colmap=None, runName='opsim', nside=64, accuracyThreshold=0.04, extraSql=None, extraMetadata=None): # The options to add additional sql constraints are removed for now. if colmap is None: colmap = ColMapDict('fbs') # Calculate a subset of DESC WFD-related metrics. displayDict = {'group': 'Strong Lensing'} displayDict['subgroup'] = 'Lens Time Delay' subsetPlots = [plots.HealpixSkyMap(), plots.HealpixHistogram()] summaryMetrics = [metrics.MeanMetric(), metrics.MedianMetric(), metrics.RmsMetric()] # Ideally need a way to do better on calculating the summary metrics for the high accuracy area. slicer = slicers.HealpixSlicer(nside=nside) tdcMetric = metrics.TdcMetric(metricName='TDC', nightCol=colmap['night'], expTimeCol=colmap['exptime'], mjdCol=colmap['mjd']) bundle = mb.MetricBundle(tdcMetric, slicer, constraint=extraSql, metadata=extraMetadata, displayDict=displayDict, plotFuncs=subsetPlots, summaryMetrics=summaryMetrics) bundleList = [bundle] # Set the runName for all bundles and return the bundleDict. for b in bundleList: b.setRunName(runName) return mb.makeBundlesDictFromList(bundleList)
def testStarMap(self): mapPath = os.environ['SIMS_MAPS_DIR'] if os.path.isfile( os.path.join(mapPath, 'StarMaps/starDensity_r_nside_64.npz')): data = makeDataValues() # check that it works if nside does not match map nside of 64 nsides = [32, 64, 128] for nside in nsides: starmap = maps.StellarDensityMap() slicer1 = slicers.HealpixSlicer(nside=nside) slicer1.setupSlicer(data) result1 = starmap.run(slicer1.slicePoints) assert ('starMapBins' in list(result1.keys())) assert ('starLumFunc' in list(result1.keys())) assert (np.max(result1['starLumFunc'] > 0)) fieldData = makeFieldData() slicer2 = slicers.OpsimFieldSlicer() slicer2.setupSlicer(data, fieldData) result2 = starmap.run(slicer2.slicePoints) assert ('starMapBins' in list(result2.keys())) assert ('starLumFunc' in list(result2.keys())) assert (np.max(result2['starLumFunc'] > 0)) else: warnings.warn('Did not find stellar density map, skipping test.')
def testDustMap(self): mapPath = os.environ['SIMS_MAPS_DIR'] if os.path.isfile(os.path.join(mapPath, 'DustMaps/dust_nside_128.npz')): data = makeDataValues() dustmap = maps.DustMap() slicer1 = slicers.HealpixSlicer() slicer1.setupSlicer(data) result1 = dustmap.run(slicer1.slicePoints) assert ('ebv' in list(result1.keys())) fieldData = makeFieldData() slicer2 = slicers.OpsimFieldSlicer() slicer2.setupSlicer(data, fieldData) result2 = dustmap.run(slicer2.slicePoints) assert ('ebv' in list(result2.keys())) # Check interpolation works dustmap = maps.DustMap(interp=True) result3 = dustmap.run(slicer2.slicePoints) assert ('ebv' in list(result3.keys())) # Check warning gets raised dustmap = maps.DustMap(nside=4) with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") dustmap.run(slicer1.slicePoints) self.assertTrue("nside" in str(w[-1].message)) else: warnings.warn('Did not find dustmaps, not running testMaps.py')
def testHistogramM5Metric(self): metric = metrics.HistogramM5Metric(bins=[0.5, 1.5, 2.5]) slicer = slicers.HealpixSlicer(nside=16) sql = '' mb = metricBundle.MetricBundle(metric, slicer, sql) # Clobber the stacker that gets auto-added mb.stackerList = [] mbg = metricBundle.MetricBundleGroup({0: mb}, None, saveEarly=False) mbg.setCurrent('') mbg.runCurrent('', simData=self.simData) good = np.where((mb.metricValues.mask[:, 0] == False) | (mb.metricValues.mask[:, 1] == False))[0] checkMetric = metrics.Coaddm5Metric() tempSlice = np.zeros(self.n1, dtype=list(zip(['fiveSigmaDepth'], [float]))) tempSlice['fiveSigmaDepth'] += self.m5_1 val1 = checkMetric.run(tempSlice) tempSlice = np.zeros(self.n2, dtype=list(zip(['fiveSigmaDepth'], [float]))) tempSlice['fiveSigmaDepth'] += self.m5_2 val2 = checkMetric.run(tempSlice) expected = np.array([[val1, -666.], [-666., val2]]) assert (np.array_equal(mb.metricValues.data[good, :], expected))
def testOut(self): """ Check that the metric bundle can generate the expected output """ slicer = slicers.HealpixSlicer(nside=8) metric = metrics.MeanMetric(col='airmass') sql = 'filter="r"' metricB = metricBundles.MetricBundle(metric, slicer, sql) filepath = os.path.join(os.getenv('SIMS_MAF_DIR'), 'tests/') database = os.path.join(filepath, 'opsimblitz1_1133_sqlite.db') opsdb = db.OpsimDatabase(database=database) resultsDb = db.ResultsDb(outDir=self.outDir) bgroup = metricBundles.MetricBundleGroup({0: metricB}, opsdb, outDir=self.outDir, resultsDb=resultsDb) bgroup.runAll() bgroup.plotAll() bgroup.writeAll() outThumbs = glob.glob(os.path.join(self.outDir, 'thumb*')) outNpz = glob.glob(os.path.join(self.outDir, '*.npz')) outPdf = glob.glob(os.path.join(self.outDir, '*.pdf')) # By default, make 3 plots for healpix assert (len(outThumbs) == 3) assert (len(outPdf) == 3) assert (len(outNpz) == 1)
def compute_metric(params): """Function to execute the metric calculation when code is called from the commandline""" obsdb = db.OpsimDatabase('../../tutorials/baseline2018a.db') outputDir = '/home/docmaf/' resultsDb = db.ResultsDb(outDir=outputDir) (propids, proptags) = obsdb.fetchPropInfo() surveyWhere = obsdb.createSQLWhere(params['survey'], proptags) obs_params = { 'filters': params['filters'], 'cadence': params['cadence'], 'start_date': params['start_date'], 'end_date': params['end_date'] } metric = CadenceOverVisibilityWindowMetric(**obs_params) slicer = slicers.HealpixSlicer(nside=64) sqlconstraint = surveyWhere bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint) bgroup = metricBundles.MetricBundleGroup({0: bundle}, obsdb, outDir='newmetric_test', resultsDb=resultsDb) bgroup.runAll()
def testHistogramMetric(self): metric = metrics.HistogramMetric(bins=[0.5, 1.5, 2.5]) slicer = slicers.HealpixSlicer(nside=16) sql = '' mb = metricBundle.MetricBundle(metric, slicer, sql) mbg = metricBundle.MetricBundleGroup({0: mb}, None, saveEarly=False) mbg.setCurrent('') mbg.runCurrent('', simData=self.simData) good = np.where(mb.metricValues.mask[:, -1] == False)[0] expected = np.array([[self.n1, 0.], [0., self.n2]]) assert (np.array_equal(mb.metricValues.data[good, :], expected)) # Check that I can run a different statistic metric = metrics.HistogramMetric(col='fiveSigmaDepth', statistic='sum', bins=[0.5, 1.5, 2.5]) mb = metricBundle.MetricBundle(metric, slicer, sql) mbg = metricBundle.MetricBundleGroup({0: mb}, None, saveEarly=False) mbg.setCurrent('') mbg.runCurrent('', simData=self.simData) expected = np.array([[self.m5_1 * self.n1, 0.], [0., self.m5_2 * self.n2]]) assert (np.array_equal(mb.metricValues.data[good, :], expected))
def load_and_run(): dbFile = 'baseline_nexp2_v1.7_10yrs.db' opsimdb = db.OpsimDatabase(dbFile) runName = dbFile.replace('.db', '') nside = 64 slicer = slicers.HealpixSlicer(nside=nside) metric = SNNSNMetric(verbose=False) #, zlim_coeff=0.98) bundleList = [] #sql = '' sql = '(note = "%s")' % ('DD:COSMOS') bundleList.append( metricBundles.MetricBundle(metric, slicer, sql, runName=runName)) outDir = 'temp' resultsDb = db.ResultsDb(outDir=outDir) bundleDict = metricBundles.makeBundlesDictFromList(bundleList) bgroup = metricBundles.MetricBundleGroup(bundleDict, opsimdb, outDir=outDir, resultsDb=resultsDb) bgroup.runAll() bgroup.plotAll()
def runChips(useCamera=False): import numpy as np import lsst.sims.maf.slicers as slicers import lsst.sims.maf.metrics as metrics import lsst.sims.maf.metricBundles as metricBundles import lsst.sims.maf.db as db from lsst.sims.maf.plots import PlotHandler import matplotlib.pylab as plt import healpy as hp print 'Camera setting = ', useCamera database = 'enigma_1189_sqlite.db' sqlWhere = 'filter = "r" and night < 800 and fieldRA < %f and fieldDec > %f and fieldDec < 0' % (np.radians(15), np.radians(-15)) opsdb = db.OpsimDatabase(database) outDir = 'Camera' resultsDb = db.ResultsDb(outDir=outDir) nside=512 tag = 'F' if useCamera: tag='T' metric = metrics.CountMetric('expMJD', metricName='chipgap_%s'%tag) slicer = slicers.HealpixSlicer(nside=nside, useCamera=useCamera) bundle1 = metricBundles.MetricBundle(metric,slicer,sqlWhere) bg = metricBundles.MetricBundleGroup({0:bundle1},opsdb, outDir=outDir, resultsDb=resultsDb) bg.runAll() hp.gnomview(bundle1.metricValues, xsize=800,ysize=800, rot=(7,-7,0), unit='Count', min=1) plt.savefig(outDir+'/fig'+tag+'.png')
def testRunRegularToo(self): """ Test that a binned slicer and a regular slicer can run together """ bundleList = [] metric = metrics.AccumulateM5Metric(bins=[0.5, 1.5, 2.5]) slicer = slicers.HealpixSlicer(nside=16) sql = '' bundleList.append(metricBundle.MetricBundle(metric, slicer, sql)) metric = metrics.Coaddm5Metric() slicer = slicers.HealpixSlicer(nside=16) bundleList.append(metricBundle.MetricBundle(metric, slicer, sql)) bd = metricBundle.makeBundlesDictFromList(bundleList) mbg = metricBundle.MetricBundleGroup(bd, None, saveEarly=False) mbg.setCurrent('') mbg.runCurrent('', simData=self.simData) assert (np.array_equal(bundleList[0].metricValues[:, 1].compressed(), bundleList[1].metricValues.compressed()))
def fOBatch(colmap=None, runName='opsim', extraSql=None, extraMetadata=None, nside=64, benchmarkArea=18000, benchmarkNvisits=825): # Allow user to add dithering. if colmap is None: colmap = ColMapDict('opsimV4') bundleList = [] sql = '' metadata = 'All visits' # Add additional sql constraint (such as wfdWhere) and metadata, if provided. if (extraSql is not None) and (len(extraSql) > 0): sql = extraSql if extraMetadata is None: metadata = extraSql.replace('filter =', '').replace('filter=', '') metadata = metadata.replace('"', '').replace("'", '') if extraMetadata is not None: metadata = extraMetadata subgroup = metadata raCol = colmap['ra'] decCol = colmap['dec'] degrees = colmap['raDecDeg'] # Set up fO metric. slicer = slicers.HealpixSlicer(nside=nside, lonCol=raCol, latCol=decCol, latLonDeg=degrees) displayDict = {'group': 'FO metrics', 'subgroup': subgroup, 'order': 0} # Configure the count metric which is what is used for f0 slicer. metric = metrics.CountMetric(col=colmap['mjd'], metricName='fO') plotDict = {'xlabel': 'Number of Visits', 'Asky': benchmarkArea, 'Nvisit': benchmarkNvisits, 'xMin': 0, 'xMax': 1500} summaryMetrics = [metrics.fOArea(nside=nside, norm=False, metricName='fOArea: Nvisits (#)', Asky=benchmarkArea, Nvisit=benchmarkNvisits), metrics.fOArea(nside=nside, norm=True, metricName='fOArea: Nvisits/benchmark', Asky=benchmarkArea, Nvisit=benchmarkNvisits), metrics.fONv(nside=nside, norm=False, metricName='fONv: Area (sqdeg)', Asky=benchmarkArea, Nvisit=benchmarkNvisits), metrics.fONv(nside=nside, norm=True, metricName='fONv: Area/benchmark', Asky=benchmarkArea, Nvisit=benchmarkNvisits)] caption = 'The FO metric evaluates the overall efficiency of observing. ' caption += ('fOArea: Nvisits = %.1f sq degrees receive at least this many visits out of %d. ' % (benchmarkArea, benchmarkNvisits)) caption += ('fONv: Area = this many square degrees out of %.1f receive at least %d visits.' % (benchmarkArea, benchmarkNvisits)) displayDict['caption'] = caption bundle = mb.MetricBundle(metric, slicer, sql, plotDict=plotDict, displayDict=displayDict, summaryMetrics=summaryMetrics, plotFuncs=[plots.FOPlot()], metadata=metadata) bundleList.append(bundle) # Set the runName for all bundles and return the bundleDict. for b in bundleList: b.setRunName(runName) return mb.makeBundlesDictFromList(bundleList)
def testHealpix2dSlicer(self): metric = metrics.AccumulateCountMetric(bins=[0.5, 1.5, 2.5]) slicer = slicers.HealpixSlicer(nside=16) sql = '' mb = metricBundle.MetricBundle(metric, slicer, sql) mbg = metricBundle.MetricBundleGroup({0: mb}, None, saveEarly=False) mbg.setCurrent('') mbg.runCurrent('', simData=self.simData) good = np.where(mb.metricValues.mask[:, -1] == False)[0] expected = np.array([[self.n1, self.n1], [-666., self.n2]]) assert (np.array_equal(mb.metricValues.data[good, :], expected))
def basicSetup(metricName, colmap=None, nside=64): if colmap is None: colmap = ColMapDict('opsimV4') slicer = slicers.HealpixSlicer(nside=nside, latCol=colmap['alt'], lonCol=colmap['az'], latLonDeg=colmap['raDecDeg'], useCache=False) metric = metrics.CountMetric(colmap['mjd'], metricName=metricName) return colmap, slicer, metric
def testAccumulateMetric(self): metric = metrics.AccumulateMetric(col='fiveSigmaDepth', bins=[0.5, 1.5, 2.5]) slicer = slicers.HealpixSlicer(nside=16) sql = '' mb = metricBundle.MetricBundle(metric, slicer, sql) # Clobber the stacker that gets auto-added mb.stackerList = [] mbg = metricBundle.MetricBundleGroup({0: mb}, None, saveEarly=False) mbg.setCurrent('') mbg.runCurrent('', simData=self.simData) good = np.where(mb.metricValues.mask[:, -1] == False)[0] expected = np.array([[self.n1*self.m5_1, self.n1*self.m5_1], [-666., self.n2 * self.m5_2]]) assert(np.array_equal(mb.metricValues.data[good, :], expected))
def test_healpixSlicer_floats(self): nside = 32 slicer = slicers.HealpixSlicer(nside=nside) metricValues = np.random.rand(hp.nside2npix(nside)) metricName = 'Noise' filename = 'healpix_test.npz' self.filenames.append(filename) slicer.writeData(filename, metricValues, metadata='testdata') metricValuesBack, slicerBack, header = self.baseslicer.readData(filename) np.testing.assert_almost_equal(metricValuesBack, metricValues) assert(slicer == slicerBack) attr2check = ['nside', 'nslice', 'columnsNeeded', 'lonCol', 'latCol'] for att in attr2check: assert(getattr(slicer, att) == getattr(slicerBack, att))
def test_complex(self): # Test case where there is a complex metric nside = 8 slicer = slicers.HealpixSlicer(nside=nside) data = np.zeros(slicer.nslice, dtype='object') for i, ack in enumerate(data): n_el = np.random.rand(1) * 4 # up to 4 elements data[i] = np.arange(n_el) with lsst.utils.tests.getTempFilePath('.npz') as filename: slicer.writeData(filename, data) dataBack, slicerBack, header = self.baseslicer.readData(filename) assert (slicer == slicerBack) # This is a crazy slow loop! for i, ack in enumerate(data): np.testing.assert_almost_equal(dataBack[i], data[i])
def test_healpixSlicer_floats(self): nside = 32 slicer = slicers.HealpixSlicer(nside=nside) metricValues = np.random.rand(hp.nside2npix(nside)) with lsst.utils.tests.getTempFilePath('.npz') as filename: slicer.writeData(filename, metricValues, metadata='testdata') metricValuesBack, slicerBack, header = self.baseslicer.readData( filename) np.testing.assert_almost_equal(metricValuesBack, metricValues) assert (slicer == slicerBack) attr2check = [ 'nside', 'nslice', 'columnsNeeded', 'lonCol', 'latCol' ] for att in attr2check: assert (getattr(slicer, att) == getattr(slicerBack, att))
def setupHealpixSlicer(args, verbose=False): """ Instantiates and sets up the healpix slicer, using the subset of simdata (simdatasubset) which should be used for this slice of the movieslicer. raCol and decCol identify the columns to be used by the healpix slicer. nside sets the resolution of the healpix slicer. Returns the healpix slicer. """ t = time.time() hs = slicers.HealpixSlicer(nside=args.nside, lonCol=args.raCol, latCol=args.decCol) dt, t = dtime(t) if verbose: print('Set up healpix slicer %f s' % (dt)) return hs
def test_healpixSlicer_obj(self): nside = 32 slicer = slicers.HealpixSlicer(nside=nside) metricValues = np.random.rand(hp.nside2npix(nside)).astype('object') metricValues = ma.MaskedArray(data=metricValues, mask=np.where(metricValues < .1, True, False), fill_value=slicer.badval) metricName = 'Noise' filename = 'healpix_test.npz' self.filenames.append(filename) metadata = 'testdata' slicer.writeData(filename, metricValues, metadata=metadata) metricValuesBack, slicerBack, header = self.baseslicer.readData(filename) np.testing.assert_almost_equal(metricValuesBack, metricValues) assert(slicer == slicerBack) assert(metadata == header['metadata']) attr2check = ['nside', 'nslice', 'columnsNeeded', 'lonCol', 'latCol'] for att in attr2check: assert(getattr(slicer, att) == getattr(slicerBack, att))
def testOut(self): """ Check that the metric bundle can generate the expected output """ nside = 8 slicer = slicers.HealpixSlicer(nside=nside) metric = metrics.MeanMetric(col='airmass') sql = 'filter="r"' stacker1 = stackers.RandomDitherFieldPerVisitStacker() stacker2 = stackers.GalacticStacker() map1 = maps.GalCoordsMap() map2 = maps.StellarDensityMap() metricB = metricBundles.MetricBundle(metric, slicer, sql, stackerList=[stacker1, stacker2], mapsList=[map1, map2]) database = os.path.join(getPackageDir('sims_data'), 'OpSimData', 'astro-lsst-01_2014.db') opsdb = db.OpsimDatabaseV4(database=database) resultsDb = db.ResultsDb(outDir=self.outDir) bgroup = metricBundles.MetricBundleGroup({0: metricB}, opsdb, outDir=self.outDir, resultsDb=resultsDb) bgroup.runAll() bgroup.plotAll() bgroup.writeAll() opsdb.close() outThumbs = glob.glob(os.path.join(self.outDir, 'thumb*')) outNpz = glob.glob(os.path.join(self.outDir, '*.npz')) outPdf = glob.glob(os.path.join(self.outDir, '*.pdf')) # By default, make 3 plots for healpix assert (len(outThumbs) == 3) assert (len(outPdf) == 3) assert (len(outNpz) == 1)
def testAccumulateM5Metric(self): metric = metrics.AccumulateM5Metric(bins=[0.5, 1.5, 2.5]) slicer = slicers.HealpixSlicer(nside=16) sql = '' mb = metricBundle.MetricBundle(metric, slicer, sql) mbg = metricBundle.MetricBundleGroup({0: mb}, None, saveEarly=False) mbg.setCurrent('') mbg.runCurrent('', simData=self.simData) good = np.where(mb.metricValues.mask[:, -1] == False)[0] checkMetric = metrics.Coaddm5Metric() tempSlice = np.zeros(self.n1, dtype=zip(['fiveSigmaDepth'], [float])) tempSlice['fiveSigmaDepth'] += self.m5_1 val1 = checkMetric.run(tempSlice) tempSlice = np.zeros(self.n2, dtype=zip(['fiveSigmaDepth'], [float])) tempSlice['fiveSigmaDepth'] += self.m5_2 val2 = checkMetric.run(tempSlice) expected = np.array([[val1, val1], [-666., val2]]) assert (np.array_equal(mb.metricValues.data[good, :], expected))
def test_healpixSlicer_masked(self): rng = np.random.RandomState(712551) nside = 32 slicer = slicers.HealpixSlicer(nside=nside) metricValues = rng.rand(hp.nside2npix(nside)) metricValues = ma.MaskedArray(data=metricValues, mask=np.where(metricValues < .1, True, False), fill_value=slicer.badval) with lsst.utils.tests.getTempFilePath('.npz') as filename: slicer.writeData(filename, metricValues, metadata='testdata') metricValuesBack, slicerBack, header = self.baseslicer.readData( filename) np.testing.assert_almost_equal(metricValuesBack, metricValues) assert (slicer == slicerBack) attr2check = [ 'nside', 'nslice', 'columnsNeeded', 'lonCol', 'latCol' ] for att in attr2check: assert (getattr(slicer, att) == getattr(slicerBack, att))
def testOut(self): """ Check that the metric bundle can generate the expected output """ nside = 8 slicer = slicers.HealpixSlicer(nside=nside) metric = metrics.MeanMetric(col='airmass') sql = 'filter="r"' stacker1 = stackers.RandomDitherFieldPerVisitStacker() stacker2 = stackers.GalacticStacker() map1 = maps.GalCoordsMap() map2 = maps.StellarDensityMap() metricB = metricBundles.MetricBundle(metric, slicer, sql, stackerList=[stacker1, stacker2]) filepath = os.path.join(os.getenv('SIMS_MAF_DIR'), 'tests/') database = os.path.join(filepath, 'opsimblitz1_1133_sqlite.db') opsdb = db.OpsimDatabase(database=database) resultsDb = db.ResultsDb(outDir=self.outDir) bgroup = metricBundles.MetricBundleGroup({0: metricB}, opsdb, outDir=self.outDir, resultsDb=resultsDb) bgroup.runAll() bgroup.plotAll() bgroup.writeAll() outThumbs = glob.glob(os.path.join(self.outDir, 'thumb*')) outNpz = glob.glob(os.path.join(self.outDir, '*.npz')) outPdf = glob.glob(os.path.join(self.outDir, '*.pdf')) # By default, make 3 plots for healpix assert (len(outThumbs) == 3) assert (len(outPdf) == 3) assert (len(outNpz) == 1)
def makeBundleList(dbFile, night=1, nside=64, latCol='ditheredDec', lonCol='ditheredRA'): """ Make a bundleList of things to run """ # Construct sql queries for each filter and all filters filters = ['u', 'g', 'r', 'i', 'z', 'y'] sqls = ['night=%i and filter="%s"' % (night, f) for f in filters] sqls.append('night=%i' % night) bundleList = [] plotFuncs_lam = [plots.LambertSkyMap()] reg_slicer = slicers.HealpixSlicer(nside=nside, lonCol=lonCol, latCol=latCol, latLonDeg=False) altaz_slicer = slicers.HealpixSlicer(nside=nside, latCol='altitude', latLonDeg=False, lonCol='azimuth', useCache=False) unislicer = slicers.UniSlicer() for sql in sqls: # Number of exposures metric = metrics.CountMetric('expMJD', metricName='N visits') bundle = metricBundles.MetricBundle(metric, reg_slicer, sql) bundleList.append(bundle) metric = metrics.CountMetric('expMJD', metricName='N visits alt az') bundle = metricBundles.MetricBundle(metric, altaz_slicer, sql, plotFuncs=plotFuncs_lam) bundleList.append(bundle) metric = metrics.MeanMetric('expMJD', metricName='Mean Visit Time') bundle = metricBundles.MetricBundle(metric, reg_slicer, sql) bundleList.append(bundle) metric = metrics.MeanMetric('expMJD', metricName='Mean Visit Time alt az') bundle = metricBundles.MetricBundle(metric, altaz_slicer, sql, plotFuncs=plotFuncs_lam) bundleList.append(bundle) metric = metrics.CountMetric('expMJD', metricName='N_visits') bundle = metricBundles.MetricBundle(metric, unislicer, sql) bundleList.append(bundle) # Need pairs in window to get a map of how well it gathered SS pairs. # Moon phase. metric = metrics.NChangesMetric(col='filter', metricName='Filter Changes') bundle = metricBundles.MetricBundle(metric, unislicer, 'night=%i' % night) bundleList.append(bundle) metric = metrics.OpenShutterFractionMetric() bundle = metricBundles.MetricBundle(metric, unislicer, 'night=%i' % night) bundleList.append(bundle) metric = metrics.MeanMetric('slewTime') bundle = metricBundles.MetricBundle(metric, unislicer, 'night=%i' % night) bundleList.append(bundle) metric = metrics.MinMetric('slewTime') bundle = metricBundles.MetricBundle(metric, unislicer, 'night=%i' % night) bundleList.append(bundle) metric = metrics.MaxMetric('slewTime') bundle = metricBundles.MetricBundle(metric, unislicer, 'night=%i' % night) bundleList.append(bundle) # Make plots of the solar system pairs that were taken in the night metric = metrics.PairMetric() sql = 'night=%i and (filter ="r" or filter="g" or filter="i")' % night bundle = metricBundles.MetricBundle(metric, reg_slicer, sql) bundleList.append(bundle) metric = metrics.PairMetric(metricName='z Pairs') sql = 'night=%i and filter="z"' % night bundle = metricBundles.MetricBundle(metric, reg_slicer, sql) bundleList.append(bundle) # Plot up each visit metric = metrics.NightPointingMetric() slicer = slicers.UniSlicer() sql = sql = 'night=%i' % night plotFuncs = [plots.NightPointingPlotter()] bundle = metricBundles.MetricBundle(metric, slicer, sql, plotFuncs=plotFuncs) bundleList.append(bundle) return metricBundles.makeBundlesDictFromList(bundleList)
def setUp(self): # Set up a slicer and some metric data for that slicer. self.testslicer = slicers.HealpixSlicer(nside=4, verbose=False)
def makeBundleList(dbFile, night=1, nside=64, latCol='fieldDec', lonCol='fieldRA', notes=True, colmap=None): """ Make a bundleList of things to run """ if colmap is None: colmap = ColMapDict('opsimV4') mjdCol = 'observationStartMJD' altCol = 'altitude' azCol = 'azimuth' # Construct sql queries for each filter and all filters filters = ['u', 'g', 'r', 'i', 'z', 'y'] sqls = ['night=%i and filter="%s"' % (night, f) for f in filters] sqls.append('night=%i' % night) bundleList = [] plotFuncs_lam = [plots.LambertSkyMap()] # Hourglass hourslicer = slicers.HourglassSlicer() displayDict = {'group': 'Hourglass'} md = '' sql = 'night=%i' % night metric = metrics.HourglassMetric(nightCol=colmap['night'], mjdCol=colmap['mjd'], metricName='Hourglass') bundle = metricBundles.MetricBundle(metric, hourslicer, constraint=sql, metadata=md, displayDict=displayDict) bundleList.append(bundle) reg_slicer = slicers.HealpixSlicer(nside=nside, lonCol=lonCol, latCol=latCol, latLonDeg=True) altaz_slicer = slicers.HealpixSlicer(nside=nside, latCol=altCol, latLonDeg=True, lonCol=azCol, useCache=False) unislicer = slicers.UniSlicer() for sql in sqls: # Number of exposures metric = metrics.CountMetric(mjdCol, metricName='N visits') bundle = metricBundles.MetricBundle(metric, reg_slicer, sql) bundleList.append(bundle) metric = metrics.CountMetric(mjdCol, metricName='N visits alt az') bundle = metricBundles.MetricBundle(metric, altaz_slicer, sql, plotFuncs=plotFuncs_lam) bundleList.append(bundle) metric = metrics.MeanMetric(mjdCol, metricName='Mean Visit Time') bundle = metricBundles.MetricBundle(metric, reg_slicer, sql) bundleList.append(bundle) metric = metrics.MeanMetric(mjdCol, metricName='Mean Visit Time alt az') bundle = metricBundles.MetricBundle(metric, altaz_slicer, sql, plotFuncs=plotFuncs_lam) bundleList.append(bundle) metric = metrics.CountMetric(mjdCol, metricName='N_visits') bundle = metricBundles.MetricBundle(metric, unislicer, sql) bundleList.append(bundle) # Need pairs in window to get a map of how well it gathered SS pairs. # Moon phase. metric = metrics.NChangesMetric(col='filter', metricName='Filter Changes') bundle = metricBundles.MetricBundle(metric, unislicer, 'night=%i' % night) bundleList.append(bundle) metric = metrics.BruteOSFMetric() bundle = metricBundles.MetricBundle(metric, unislicer, 'night=%i' % night) bundleList.append(bundle) metric = metrics.MeanMetric('slewTime') bundle = metricBundles.MetricBundle(metric, unislicer, 'night=%i' % night) bundleList.append(bundle) metric = metrics.MinMetric('slewTime') bundle = metricBundles.MetricBundle(metric, unislicer, 'night=%i' % night) bundleList.append(bundle) metric = metrics.MaxMetric('slewTime') bundle = metricBundles.MetricBundle(metric, unislicer, 'night=%i' % night) bundleList.append(bundle) # Make plots of the solar system pairs that were taken in the night metric = metrics.PairMetric(mjdCol=mjdCol) sql = 'night=%i and (filter ="r" or filter="g" or filter="i")' % night bundle = metricBundles.MetricBundle(metric, reg_slicer, sql) bundleList.append(bundle) metric = metrics.PairMetric(mjdCol=mjdCol, metricName='z Pairs') sql = 'night=%i and filter="z"' % night bundle = metricBundles.MetricBundle(metric, reg_slicer, sql) bundleList.append(bundle) # Plot up each visit metric = metrics.NightPointingMetric(mjdCol=mjdCol) slicer = slicers.UniSlicer() sql = 'night=%i' % night plotFuncs = [plots.NightPointingPlotter()] bundle = metricBundles.MetricBundle(metric, slicer, sql, plotFuncs=plotFuncs) bundleList.append(bundle) # stats from the note column if notes: displayDict = {'group': 'Basic Stats', 'subgroup': 'Percent stats'} metric = metrics.StringCountMetric(col='note', percent=True, metricName='Percents') bundle = metricBundles.MetricBundle(metric, unislicer, sql, displayDict=displayDict) bundleList.append(bundle) displayDict['subgroup'] = 'Count Stats' metric = metrics.StringCountMetric(col='note', metricName='Counts') bundle = metricBundles.MetricBundle(metric, unislicer, sql, displayDict=displayDict) bundleList.append(bundle) return metricBundles.makeBundlesDictFromList(bundleList)
def ResultadosNtotBolV2(FBS, mod): # ========================================================== # mod = "A" # FBS = "1.5" # modo = "A" # filtros_considerados = ["u","g"] # f1,f2 tq f2 mas rojo que f1 # ========================================================== #validacion(filtros_considerados) #f1,f2 = filtros_considerados # g_modA_LookupT_extension.pk # lookup_table = "{}_mod{}_LookupT_extension.pkl".format(f2, modo) # debe estar en la carpeta de /lookuptables en /essentials # f2 porque ese se ocupa , el f1 es para potencial lyman pbreak nomas #filtros_modo = "{}_mod{}".format("".join(filtros_considerados),modo) print("FBS usado:", FBS) print("mod:", mod) ##################################################################################### ################################## 3 BUNDLES ######################################## ##################################################################################### metric = NtotMetricV2(mod, f1f2diff=2) # ========================= WFD ================================= constraint1 = "note NOT LIKE '%DD%'" wfd_standard = schedUtils.WFD_no_gp_healpixels( 64) #, dec_max=2.5, dec_min=-62.5) slicer1 = slicers.HealpixSubsetSlicer( 64, np.where(wfd_standard == 1)[0] ) #nside = 64, hpid = The subset of healpix id's to use to calculate the metric. bundle1 = mb.MetricBundle(metric, slicer1, constraint1) # ========================= DDF ================================= constraint2 = "note LIKE '%DD%'" slicer2 = slicers.HealpixSlicer(nside=64) bundle2 = mb.MetricBundle(metric, slicer2, constraint2) print("==============================================") print("constraint WFD:" + constraint1) print("constraint DDF:" + constraint2) ##################################################################################### ################################# DIRECTORIOS ####################################### ##################################################################################### #Please enter your SciServer username between the single quotes below! # your_username = '******' # Check avaiable database directoies show_fbs_dirs() # if your_username == '': # do NOT put your username here, put it in the cell at the top of the notebook. # raise Exception('Please provide your username! See the top of the notebook.') dbDir = './lsst_cadence/FBS_{}/'.format(FBS) outDir = '/data/agonzalez/output_FBS_{}/bolNtot_mod{}_FINAL/'.format( FBS, mod) if not os.path.exists(os.path.abspath(outDir)): os.makedirs(os.path.abspath(outDir), exist_ok=True) opSimDbs, resultDbs = connect_dbs(dbDir, outDir) metricDataPath = '/data/agonzalez/output_FBS_{}/bolNtot_mod{}_FINAL/MetricData/'.format( FBS, mod) if not os.path.exists(os.path.abspath(metricDataPath)): os.makedirs(os.path.abspath(metricDataPath), exist_ok=True) print("===================================================") print("dbDir :", dbDir) print("outDir :", outDir) print("metricDataPath :", metricDataPath) print("===================================================") ##################################################################################### ################################# BUNDLE GROUP ###################################### ##################################################################################### dbRuns = show_opsims(dbDir) print(dbRuns) dbRuns = [x for x in dbRuns if "noddf" not in x] #archivo70plus = open("jhu70plus{}.txt".format(FBS),"r") #dbRuns = [x.rstrip() for x in list(archivo70plus)] #archivo70plus.close() for run in dbRuns: #[70:]: bDict = {"WFD": bundle1, "DDF": bundle2} bundle1.setRunName(run) bundle2.setRunName(run) bgroup = mb.MetricBundleGroup(bDict, opSimDbs[run], metricDataPath, resultDbs[run]) bgroup.runAll()
def scienceRadarBatch(colmap=None, runName='opsim', extraSql=None, extraMetadata=None, nside=64, benchmarkArea=18000, benchmarkNvisits=825, DDF=True): """A batch of metrics for looking at survey performance relative to the SRD and the main science drivers of LSST. Parameters ---------- """ # Hide dependencies from mafContrib.LSSObsStrategy.galaxyCountsMetric_extended import GalaxyCountsMetric_extended from mafContrib import Plasticc_metric, plasticc_slicer, load_plasticc_lc, TDEsAsciiMetric if colmap is None: colmap = ColMapDict('fbs') if extraSql is None: extraSql = '' if extraSql == '': joiner = '' else: joiner = ' and ' bundleList = [] # Get some standard per-filter coloring and sql constraints filterlist, colors, filterorders, filtersqls, filtermetadata = filterList( all=False, extraSql=extraSql, extraMetadata=extraMetadata) standardStats = standardSummary(withCount=False) healslicer = slicers.HealpixSlicer(nside=nside) subsetPlots = [plots.HealpixSkyMap(), plots.HealpixHistogram()] # Load up the plastic light curves models = ['SNIa-normal', 'KN'] plasticc_models_dict = {} for model in models: plasticc_models_dict[model] = list( load_plasticc_lc(model=model).values()) ######################### # SRD, DM, etc ######################### fOb = fOBatch(runName=runName, colmap=colmap, extraSql=extraSql, extraMetadata=extraMetadata, benchmarkArea=benchmarkArea, benchmarkNvisits=benchmarkNvisits) astromb = astrometryBatch(runName=runName, colmap=colmap, extraSql=extraSql, extraMetadata=extraMetadata) rapidb = rapidRevisitBatch(runName=runName, colmap=colmap, extraSql=extraSql, extraMetadata=extraMetadata) # loop through and modify the display dicts - set SRD as group and their previous 'group' as the subgroup temp_list = [] for key in fOb: temp_list.append(fOb[key]) for key in astromb: temp_list.append(astromb[key]) for key in rapidb: temp_list.append(rapidb[key]) for metricb in temp_list: metricb.displayDict['subgroup'] = metricb.displayDict['group'].replace( 'SRD', '').lstrip(' ') metricb.displayDict['group'] = 'SRD' bundleList.extend(temp_list) displayDict = { 'group': 'SRD', 'subgroup': 'Year Coverage', 'order': 0, 'caption': 'Number of years with observations.' } slicer = slicers.HealpixSlicer(nside=nside) metric = metrics.YearCoverageMetric() for f in filterlist: plotDict = {'colorMin': 7, 'colorMax': 10, 'color': colors[f]} summary = [ metrics.AreaSummaryMetric(area=18000, reduce_func=np.mean, decreasing=True, metricName='N Seasons (18k) %s' % f) ] bundleList.append( mb.MetricBundle(metric, slicer, filtersqls[f], plotDict=plotDict, metadata=filtermetadata[f], displayDict=displayDict, summaryMetrics=summary)) ######################### # Solar System ######################### # Generally, we need to run Solar System metrics separately; they're a multi-step process. ######################### # Cosmology ######################### displayDict = { 'group': 'Cosmology', 'subgroup': 'Galaxy Counts', 'order': 0, 'caption': None } plotDict = {'percentileClip': 95., 'nTicks': 5} sql = extraSql + joiner + 'filter="i"' metadata = combineMetadata(extraMetadata, 'i band') metric = GalaxyCountsMetric_extended(filterBand='i', redshiftBin='all', nside=nside) summary = [ metrics.AreaSummaryMetric(area=18000, reduce_func=np.sum, decreasing=True, metricName='N Galaxies (18k)') ] summary.append(metrics.SumMetric(metricName='N Galaxies (all)')) # make sure slicer has cache off slicer = slicers.HealpixSlicer(nside=nside, useCache=False) bundle = mb.MetricBundle(metric, slicer, sql, plotDict=plotDict, metadata=metadata, displayDict=displayDict, summaryMetrics=summary, plotFuncs=subsetPlots) bundleList.append(bundle) displayDict['order'] += 1 # let's put Type Ia SN in here displayDict['subgroup'] = 'SNe Ia' # XXX-- use the light curves from PLASTICC here displayDict['caption'] = 'Fraction of normal SNe Ia' sql = extraSql slicer = plasticc_slicer(plcs=plasticc_models_dict['SNIa-normal'], seed=42, badval=0) metric = Plasticc_metric(metricName='SNIa') # Set the maskval so that we count missing objects as zero. summary_stats = [metrics.MeanMetric(maskVal=0)] plotFuncs = [plots.HealpixSkyMap()] bundle = mb.MetricBundle(metric, slicer, sql, runName=runName, summaryMetrics=summary_stats, plotFuncs=plotFuncs, metadata=extraMetadata, displayDict=displayDict) bundleList.append(bundle) displayDict['order'] += 1 displayDict['subgroup'] = 'Camera Rotator' displayDict[ 'caption'] = 'Kuiper statistic (0 is uniform, 1 is delta function) of the ' slicer = slicers.HealpixSlicer(nside=nside) metric1 = metrics.KuiperMetric('rotSkyPos') metric2 = metrics.KuiperMetric('rotTelPos') for f in filterlist: for m in [metric1, metric2]: plotDict = {'color': colors[f]} displayDict['order'] = filterorders[f] displayDict['caption'] += f"{m.colname} for visits in {f} band." bundleList.append( mb.MetricBundle(m, slicer, filtersqls[f], plotDict=plotDict, displayDict=displayDict, summaryMetrics=standardStats, plotFuncs=subsetPlots)) # XXX--need some sort of metric for weak lensing ######################### # Variables and Transients ######################### displayDict = { 'group': 'Variables/Transients', 'subgroup': 'Periodic Stars', 'order': 0, 'caption': None } for period in [ 0.5, 1, 2, ]: for magnitude in [21., 24.]: amplitudes = [0.05, 0.1, 1.0] periods = [period] * len(amplitudes) starMags = [magnitude] * len(amplitudes) plotDict = { 'nTicks': 3, 'colorMin': 0, 'colorMax': 3, 'xMin': 0, 'xMax': 3 } metadata = combineMetadata( 'P_%.1f_Mag_%.0f_Amp_0.05-0.1-1' % (period, magnitude), extraMetadata) sql = None displayDict['caption'] = 'Metric evaluates if a periodic signal of period %.1f days could ' \ 'be detected for an r=%i star. A variety of amplitudes of periodicity ' \ 'are tested: [1, 0.1, and 0.05] mag amplitudes, which correspond to ' \ 'metric values of [1, 2, or 3]. ' % (period, magnitude) metric = metrics.PeriodicDetectMetric(periods=periods, starMags=starMags, amplitudes=amplitudes, metricName='PeriodDetection') bundle = mb.MetricBundle(metric, healslicer, sql, metadata=metadata, displayDict=displayDict, plotDict=plotDict, plotFuncs=subsetPlots, summaryMetrics=standardStats) bundleList.append(bundle) displayDict['order'] += 1 # XXX add some PLASTICC metrics for kilovnova and tidal disruption events. displayDict['subgroup'] = 'KN' displayDict['caption'] = 'Fraction of Kilonova (from PLASTICC)' displayDict['order'] = 0 slicer = plasticc_slicer(plcs=plasticc_models_dict['KN'], seed=43, badval=0) metric = Plasticc_metric(metricName='KN') plotFuncs = [plots.HealpixSkyMap()] summary_stats = [metrics.MeanMetric(maskVal=0)] bundle = mb.MetricBundle(metric, slicer, extraSql, runName=runName, summaryMetrics=summary_stats, plotFuncs=plotFuncs, metadata=extraMetadata, displayDict=displayDict) bundleList.append(bundle) # Tidal Disruption Events displayDict['subgroup'] = 'TDE' displayDict[ 'caption'] = 'Fraction of TDE lightcurves that could be identified, outside of DD fields' detectSNR = {'u': 5, 'g': 5, 'r': 5, 'i': 5, 'z': 5, 'y': 5} # light curve parameters epochStart = -22 peakEpoch = 0 nearPeakT = 10 postPeakT = 14 # two weeks nPhaseCheck = 1 # condition parameters nObsTotal = {'u': 0, 'g': 0, 'r': 0, 'i': 0, 'z': 0, 'y': 0} nObsPrePeak = 1 nObsNearPeak = {'u': 0, 'g': 0, 'r': 0, 'i': 0, 'z': 0, 'y': 0} nFiltersNearPeak = 3 nObsPostPeak = 0 nFiltersPostPeak = 2 metric = TDEsAsciiMetric(asciifile=None, detectSNR=detectSNR, epochStart=epochStart, peakEpoch=peakEpoch, nearPeakT=nearPeakT, postPeakT=postPeakT, nPhaseCheck=nPhaseCheck, nObsTotal=nObsTotal, nObsPrePeak=nObsPrePeak, nObsNearPeak=nObsNearPeak, nFiltersNearPeak=nFiltersNearPeak, nObsPostPeak=nObsPostPeak, nFiltersPostPeak=nFiltersPostPeak) slicer = slicers.HealpixSlicer(nside=32) sql = extraSql + joiner + "note not like '%DD%'" md = extraMetadata if md is None: md = " NonDD" else: md += 'NonDD' bundle = mb.MetricBundle(metric, slicer, sql, runName=runName, summaryMetrics=standardStats, plotFuncs=plotFuncs, metadata=md, displayDict=displayDict) bundleList.append(bundle) # XXX -- would be good to add some microlensing events, for both MW and LMC/SMC. ######################### # Milky Way ######################### displayDict = {'group': 'Milky Way', 'subgroup': ''} displayDict['subgroup'] = 'N stars' slicer = slicers.HealpixSlicer(nside=nside, useCache=False) sum_stats = [metrics.SumMetric(metricName='Total N Stars')] for f in filterlist: displayDict['order'] = filterorders[f] displayDict['caption'] = 'Number of stars in %s band with an measurement error due to crowding ' \ 'of less than 0.1 mag' % f # Configure the NstarsMetric - note 'filtername' refers to the filter in which to evaluate crowding metric = metrics.NstarsMetric(crowding_error=0.1, filtername='r', seeingCol=colmap['seeingGeom'], m5Col=colmap['fiveSigmaDepth']) plotDict = {'nTicks': 5, 'logScale': True, 'colorMin': 100} bundle = mb.MetricBundle(metric, slicer, filtersqls[f], runName=runName, summaryMetrics=sum_stats, plotFuncs=subsetPlots, plotDict=plotDict, displayDict=displayDict) bundleList.append(bundle) ######################### # DDF ######################### if DDF: # Hide this import to avoid adding a dependency. from lsst.sims.featureScheduler.surveys import generate_dd_surveys, Deep_drilling_survey ddf_surveys = generate_dd_surveys() # Add on the Euclid fields # XXX--to update. Should have a spot where all the DDF locations are stored. ddf_surveys.append( Deep_drilling_survey([], 58.97, -49.28, survey_name='DD:EDFSa')) ddf_surveys.append( Deep_drilling_survey([], 63.6, -47.60, survey_name='DD:EDFSb')) # For doing a high-res sampling of the DDF for co-adds ddf_radius = 1.8 # Degrees ddf_nside = 512 ra, dec = hpid2RaDec(ddf_nside, np.arange(hp.nside2npix(ddf_nside))) displayDict = {'group': 'DDF depths', 'subgroup': None} for survey in ddf_surveys: displayDict['subgroup'] = survey.survey_name # Crop off the u-band only DDF if survey.survey_name[0:4] != 'DD:u': dist_to_ddf = angularSeparation(ra, dec, np.degrees(survey.ra), np.degrees(survey.dec)) goodhp = np.where(dist_to_ddf <= ddf_radius) slicer = slicers.UserPointsSlicer(ra=ra[goodhp], dec=dec[goodhp], useCamera=False) for f in filterlist: metric = metrics.Coaddm5Metric( metricName=survey.survey_name + ', ' + f) summary = [ metrics.MedianMetric(metricName='Median depth ' + survey.survey_name + ', ' + f) ] plotDict = {'color': colors[f]} sql = filtersqls[f] displayDict['order'] = filterorders[f] displayDict['caption'] = 'Coadded m5 depth in %s band.' % ( f) bundle = mb.MetricBundle(metric, slicer, sql, metadata=filtermetadata[f], displayDict=displayDict, summaryMetrics=summary, plotFuncs=[], plotDict=plotDict) bundleList.append(bundle) displayDict = {'group': 'DDF Transients', 'subgroup': None} for survey in ddf_surveys: displayDict['subgroup'] = survey.survey_name if survey.survey_name[0:4] != 'DD:u': slicer = plasticc_slicer( plcs=plasticc_models_dict['SNIa-normal'], seed=42, ra_cen=survey.ra, dec_cen=survey.dec, radius=np.radians(3.), useCamera=False) metric = Plasticc_metric(metricName=survey.survey_name + ' SNIa') sql = extraSql summary_stats = [metrics.MeanMetric(maskVal=0)] plotFuncs = [plots.HealpixSkyMap()] bundle = mb.MetricBundle(metric, slicer, sql, runName=runName, summaryMetrics=summary_stats, plotFuncs=plotFuncs, metadata=extraMetadata, displayDict=displayDict) bundleList.append(bundle) displayDict['order'] = 10 # Set the runName for all bundles and return the bundleDict. for b in bundleList: b.setRunName(runName) bundleDict = mb.makeBundlesDictFromList(bundleList) return bundleDict
def makeBundleList(dbFile, runName=None, nside=64, benchmark='design', lonCol='fieldRA', latCol='fieldDec', seeingCol='seeingFwhmGeom'): """ make a list of metricBundle objects to look at the scientific performance of an opsim run. """ # List to hold everything we're going to make bundleList = [] # List to hold metrics that shouldn't be saved noSaveBundleList = [] # Connect to the databse opsimdb = db.OpsimDatabaseV4(dbFile) if runName is None: runName = os.path.basename(dbFile).replace('_sqlite.db', '') # Fetch the proposal ID values from the database propids, propTags = opsimdb.fetchPropInfo() # Fetch the telescope location from config lat, lon, height = opsimdb.fetchLatLonHeight() # Add metadata regarding dithering/non-dithered. commonname = ''.join([a for a in lonCol if a in latCol]) if commonname == 'field': slicermetadata = ' (non-dithered)' else: slicermetadata = ' (%s)' % (commonname) # Construct a WFD SQL where clause so multiple propIDs can query by WFD: wfdWhere = opsimdb.createSQLWhere('WFD', propTags) print('#FYI: WFD "where" clause: %s' % (wfdWhere)) ddWhere = opsimdb.createSQLWhere('DD', propTags) print('#FYI: DD "where" clause: %s' % (ddWhere)) # Set up benchmark values, scaled to length of opsim run. runLength = opsimdb.fetchRunLength() if benchmark == 'requested': # Fetch design values for seeing/skybrightness/single visit depth. benchmarkVals = utils.scaleBenchmarks(runLength, benchmark='design') # Update nvisits with requested visits from config files. benchmarkVals['nvisits'] = opsimdb.fetchRequestedNvisits(propId=propTags['WFD']) # Calculate expected coadded depth. benchmarkVals['coaddedDepth'] = utils.calcCoaddedDepth(benchmarkVals['nvisits'], benchmarkVals['singleVisitDepth']) elif (benchmark == 'stretch') or (benchmark == 'design'): # Calculate benchmarks for stretch or design. benchmarkVals = utils.scaleBenchmarks(runLength, benchmark=benchmark) benchmarkVals['coaddedDepth'] = utils.calcCoaddedDepth(benchmarkVals['nvisits'], benchmarkVals['singleVisitDepth']) else: raise ValueError('Could not recognize benchmark value %s, use design, stretch or requested.' % (benchmark)) # Check that nvisits is not set to zero (for very short run length). for f in benchmarkVals['nvisits']: if benchmarkVals['nvisits'][f] == 0: print('Updating benchmark nvisits value in %s to be nonzero' % (f)) benchmarkVals['nvisits'][f] = 1 # Set values for min/max range of nvisits for All/WFD and DD plots. These are somewhat arbitrary. nvisitsRange = {} nvisitsRange['all'] = {'u': [20, 80], 'g': [50, 150], 'r': [100, 250], 'i': [100, 250], 'z': [100, 300], 'y': [100, 300]} nvisitsRange['DD'] = {'u': [6000, 10000], 'g': [2500, 5000], 'r': [5000, 8000], 'i': [5000, 8000], 'z': [7000, 10000], 'y': [5000, 8000]} # Scale these ranges for the runLength. scale = runLength / 10.0 for prop in nvisitsRange: for f in nvisitsRange[prop]: for i in [0, 1]: nvisitsRange[prop][f][i] = int(np.floor(nvisitsRange[prop][f][i] * scale)) # Filter list, and map of colors (for plots) to filters. filters = ['u', 'g', 'r', 'i', 'z', 'y'] colors = {'u': 'cyan', 'g': 'g', 'r': 'y', 'i': 'r', 'z': 'm', 'y': 'k'} filtorder = {'u': 1, 'g': 2, 'r': 3, 'i': 4, 'z': 5, 'y': 6} # Easy way to run through all fi # Set up a list of common summary stats commonSummary = [metrics.MeanMetric(), metrics.RobustRmsMetric(), metrics.MedianMetric(), metrics.PercentileMetric(metricName='25th%ile', percentile=25), metrics.PercentileMetric(metricName='75th%ile', percentile=75), metrics.MinMetric(), metrics.MaxMetric()] allStats = commonSummary # Set up some 'group' labels reqgroup = 'A: Required SRD metrics' depthgroup = 'B: Depth per filter' uniformitygroup = 'C: Uniformity' airmassgroup = 'D: Airmass distribution' seeinggroup = 'E: Seeing distribution' transgroup = 'F: Transients' sngroup = 'G: SN Ia' altAzGroup = 'H: Alt Az' rangeGroup = 'I: Range of Dates' intergroup = 'J: Inter-Night' phaseGroup = 'K: Max Phase Gap' NEOGroup = 'L: NEO Detection' # Set up an object to track the metricBundles that we want to combine into merged plots. mergedHistDict = {} # Set the histogram merge function. mergeFunc = plots.HealpixHistogram() keys = ['NVisits', 'coaddm5', 'NormEffTime', 'Minseeing', 'seeingAboveLimit', 'minAirmass', 'fracAboveAirmass'] for key in keys: mergedHistDict[key] = plots.PlotBundle(plotFunc=mergeFunc) ## # Calculate the fO metrics for all proposals and WFD only. order = 0 for prop in ('All prop', 'WFD only'): if prop == 'All prop': metadata = 'All Visits' + slicermetadata sqlconstraint = '' if prop == 'WFD only': metadata = 'WFD only' + slicermetadata sqlconstraint = '%s' % (wfdWhere) # Configure the count metric which is what is used for f0 slicer. m1 = metrics.CountMetric(col='observationStartMJD', metricName='fO') plotDict = {'xlabel': 'Number of Visits', 'Asky': benchmarkVals['Area'], 'Nvisit': benchmarkVals['nvisitsTotal'], 'xMin': 0, 'xMax': 1500} summaryMetrics = [metrics.fOArea(nside=nside, norm=False, metricName='fOArea: Nvisits (#)', Asky=benchmarkVals['Area'], Nvisit=benchmarkVals['nvisitsTotal']), metrics.fOArea(nside=nside, norm=True, metricName='fOArea: Nvisits/benchmark', Asky=benchmarkVals['Area'], Nvisit=benchmarkVals['nvisitsTotal']), metrics.fONv(nside=nside, norm=False, metricName='fONv: Area (sqdeg)', Asky=benchmarkVals['Area'], Nvisit=benchmarkVals['nvisitsTotal']), metrics.fONv(nside=nside, norm=True, metricName='fONv: Area/benchmark', Asky=benchmarkVals['Area'], Nvisit=benchmarkVals['nvisitsTotal'])] caption = 'The FO metric evaluates the overall efficiency of observing. ' caption += ('fOArea: Nvisits = %.1f sq degrees receive at least this many visits out of %d. ' % (benchmarkVals['Area'], benchmarkVals['nvisitsTotal'])) caption += ('fONv: Area = this many square degrees out of %.1f receive at least %d visits.' % (benchmarkVals['Area'], benchmarkVals['nvisitsTotal'])) displayDict = {'group': reqgroup, 'subgroup': 'F0', 'displayOrder': order, 'caption': caption} order += 1 slicer = slicers.HealpixSlicer(nside=nside, lonCol=lonCol, latCol=latCol) bundle = metricBundles.MetricBundle(m1, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, summaryMetrics=summaryMetrics, plotFuncs=[plots.FOPlot()], runName=runName, metadata=metadata) bundleList.append(bundle) ### # Calculate the Rapid Revisit Metrics. order = 0 metadata = 'All Visits' + slicermetadata sqlconstraint = '' dTmin = 40.0 # seconds dTmax = 30.0*60. # seconds minNvisit = 100 pixArea = float(hp.nside2pixarea(nside, degrees=True)) scale = pixArea * hp.nside2npix(nside) cutoff1 = 0.15 extraStats1 = [metrics.FracBelowMetric(cutoff=cutoff1, scale=scale, metricName='Area (sq deg)')] extraStats1.extend(commonSummary) slicer = slicers.HealpixSlicer(nside=nside, lonCol=lonCol, latCol=latCol) m1 = metrics.RapidRevisitMetric(metricName='RapidRevisitUniformity', dTmin=dTmin / 60.0 / 60.0 / 24.0, dTmax=dTmax / 60.0 / 60.0 / 24.0, minNvisits=minNvisit) plotDict = {'xMin': 0, 'xMax': 1} summaryStats = extraStats1 caption = 'Deviation from uniformity for short revisit timescales, between %s and %s seconds, ' % ( dTmin, dTmax) caption += 'for pointings with at least %d visits in this time range. ' % (minNvisit) caption += 'Summary statistic "Area" below indicates the area on the sky which has a ' caption += 'deviation from uniformity of < %.2f.' % (cutoff1) displayDict = {'group': reqgroup, 'subgroup': 'Rapid Revisit', 'displayOrder': order, 'caption': caption} bundle = metricBundles.MetricBundle(m1, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, summaryMetrics=summaryStats, runName=runName, metadata=metadata) bundleList.append(bundle) order += 1 dTmax = dTmax/60.0 # need time in minutes for Nrevisits metric m2 = metrics.NRevisitsMetric(dT=dTmax) plotDict = {'xMin': 0.1, 'xMax': 2000, 'logScale': True} cutoff2 = 800 extraStats2 = [metrics.FracAboveMetric(cutoff=cutoff2, scale=scale, metricName='Area (sq deg)')] extraStats2.extend(commonSummary) caption = 'Number of consecutive visits with return times faster than %.1f minutes, ' % (dTmax) caption += 'in any filter, all proposals. ' caption += 'Summary statistic "Area" below indicates the area on the sky which has more than ' caption += '%d revisits within this time window.' % (cutoff2) summaryStats = extraStats2 displayDict = {'group': reqgroup, 'subgroup': 'Rapid Revisit', 'displayOrder': order, 'caption': caption} bundle = metricBundles.MetricBundle(m2, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, summaryMetrics=summaryStats, runName=runName, metadata=metadata) bundleList.append(bundle) order += 1 m3 = metrics.NRevisitsMetric(dT=dTmax, normed=True) plotDict = {'xMin': 0, 'xMax': 1, 'cbarFormat': '%.1f'} cutoff3 = 0.6 extraStats3 = [metrics.FracAboveMetric(cutoff=cutoff3, scale=scale, metricName='Area (sq deg)')] extraStats3.extend(commonSummary) summaryStats = extraStats3 caption = 'Fraction of total visits where consecutive visits have return times faster ' caption += 'than %.1f minutes, in any filter, all proposals. ' % (dTmax) caption += 'Summary statistic "Area" below indicates the area on the sky which has more ' caption += 'than %d revisits within this time window.' % (cutoff3) displayDict = {'group': reqgroup, 'subgroup': 'Rapid Revisit', 'displayOrder': order, 'caption': caption} bundle = metricBundles.MetricBundle(m3, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, summaryMetrics=summaryStats, runName=runName, metadata=metadata) bundleList.append(bundle) order += 1 # And add a histogram of the time between quick revisits. binMin = 0 binMax = 120. binsize = 3. bins_metric = np.arange(binMin / 60.0 / 24.0, (binMax + binsize) / 60. / 24., binsize / 60. / 24.) bins_plot = bins_metric * 24.0 * 60.0 m1 = metrics.TgapsMetric(bins=bins_metric, metricName='dT visits') plotDict = {'bins': bins_plot, 'xlabel': 'dT (minutes)'} caption = ('Histogram of the time between consecutive revisits (<%.1f minutes), over entire sky.' % (binMax)) displayDict = {'group': reqgroup, 'subgroup': 'Rapid Revisit', 'order': order, 'caption': caption} slicer = slicers.HealpixSlicer(nside=nside, lonCol=lonCol, latCol=latCol) plotFunc = plots.SummaryHistogram() bundle = metricBundles.MetricBundle(m1, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, runName=runName, metadata=metadata, plotFuncs=[plotFunc]) bundleList.append(bundle) order += 1 ## # Trigonometric parallax and proper motion @ r=20 and r=24 slicer = slicers.HealpixSlicer(nside=nside, lonCol=lonCol, latCol=latCol) sqlconstraint = '' order = 0 metric = metrics.ParallaxMetric(metricName='Parallax 20', rmag=20, seeingCol=seeingCol) summaryStats = allStats plotDict = {'cbarFormat': '%.1f', 'xMin': 0, 'xMax': 3} displayDict = {'group': reqgroup, 'subgroup': 'Parallax', 'order': order, 'caption': 'Parallax precision at r=20. (without refraction).'} bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, summaryMetrics=summaryStats, runName=runName, metadata=metadata) bundleList.append(bundle) order += 1 metric = metrics.ParallaxMetric(metricName='Parallax 24', rmag=24, seeingCol=seeingCol) plotDict = {'cbarFormat': '%.1f', 'xMin': 0, 'xMax': 10} displayDict = {'group': reqgroup, 'subgroup': 'Parallax', 'order': order, 'caption': 'Parallax precision at r=24. (without refraction).'} bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, summaryMetrics=summaryStats, runName=runName, metadata=metadata) bundleList.append(bundle) order += 1 metric = metrics.ParallaxMetric(metricName='Parallax Normed', rmag=24, normalize=True, seeingCol=seeingCol) plotDict = {'xMin': 0.5, 'xMax': 1.0} displayDict = {'group': reqgroup, 'subgroup': 'Parallax', 'order': order, 'caption': 'Normalized parallax (normalized to optimum observation cadence, 1=optimal).'} bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, summaryMetrics=summaryStats, runName=runName, metadata=metadata) bundleList.append(bundle) order += 1 metric = metrics.ParallaxCoverageMetric(metricName='Parallax Coverage 20', rmag=20, seeingCol=seeingCol) plotDict = {} caption = "Parallax factor coverage for an r=20 star (0 is bad, 0.5-1 is good). " caption += "One expects the parallax factor coverage to vary because stars on the ecliptic " caption += "can be observed when they have no parallax offset while stars at the pole are always " caption += "offset by the full parallax offset.""" displayDict = {'group': reqgroup, 'subgroup': 'Parallax', 'order': order, 'caption': caption} bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, summaryMetrics=summaryStats, runName=runName, metadata=metadata) bundleList.append(bundle) order += 1 metric = metrics.ParallaxCoverageMetric(metricName='Parallax Coverage 24', rmag=24, seeingCol=seeingCol) plotDict = {} caption = "Parallax factor coverage for an r=24 star (0 is bad, 0.5-1 is good). " caption += "One expects the parallax factor coverage to vary because stars on the ecliptic " caption += "can be observed when they have no parallax offset while stars at the pole are always " caption += "offset by the full parallax offset.""" displayDict = {'group': reqgroup, 'subgroup': 'Parallax', 'order': order, 'caption': caption} bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, summaryMetrics=summaryStats, runName=runName, metadata=metadata) bundleList.append(bundle) order += 1 metric = metrics.ParallaxDcrDegenMetric(metricName='Parallax-DCR degeneracy 20', rmag=20, seeingCol=seeingCol) plotDict = {} caption = 'Correlation between parallax offset magnitude and hour angle an r=20 star.' caption += ' (0 is good, near -1 or 1 is bad).' displayDict = {'group': reqgroup, 'subgroup': 'Parallax', 'order': order, 'caption': caption} bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, summaryMetrics=summaryStats, runName=runName, metadata=metadata) bundleList.append(bundle) order += 1 metric = metrics.ParallaxDcrDegenMetric(metricName='Parallax-DCR degeneracy 24', rmag=24, seeingCol=seeingCol) plotDict = {} caption = 'Correlation between parallax offset magnitude and hour angle an r=24 star.' caption += ' (0 is good, near -1 or 1 is bad).' displayDict = {'group': reqgroup, 'subgroup': 'Parallax', 'order': order, 'caption': caption} bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, summaryMetrics=summaryStats, runName=runName, metadata=metadata) bundleList.append(bundle) order += 1 metric = metrics.ProperMotionMetric(metricName='Proper Motion 20', rmag=20, seeingCol=seeingCol) summaryStats = allStats plotDict = {'xMin': 0, 'xMax': 3} displayDict = {'group': reqgroup, 'subgroup': 'Proper Motion', 'order': order, 'caption': 'Proper Motion precision at r=20.'} bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, summaryMetrics=summaryStats, runName=runName, metadata=metadata) bundleList.append(bundle) order += 1 metric = metrics.ProperMotionMetric(rmag=24, metricName='Proper Motion 24', seeingCol=seeingCol) summaryStats = allStats plotDict = {'xMin': 0, 'xMax': 10} displayDict = {'group': reqgroup, 'subgroup': 'Proper Motion', 'order': order, 'caption': 'Proper Motion precision at r=24.'} bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, summaryMetrics=summaryStats, runName=runName, metadata=metadata) bundleList.append(bundle) order += 1 metric = metrics.ProperMotionMetric(rmag=24, normalize=True, metricName='Proper Motion Normed', seeingCol=seeingCol) plotDict = {'xMin': 0.2, 'xMax': 0.7} caption = 'Normalized proper motion at r=24. ' caption += '(normalized to optimum observation cadence - start/end. 1=optimal).' displayDict = {'group': reqgroup, 'subgroup': 'Proper Motion', 'order': order, 'caption': caption} bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, summaryMetrics=summaryStats, runName=runName, metadata=metadata) bundleList.append(bundle) order += 1 ## # Calculate the time uniformity in each filter, for each year. order = 0 slicer = slicers.HealpixSlicer(nside=nside, lonCol=lonCol, latCol=latCol) plotFuncs = [plots.TwoDMap()] step = 0.5 bins = np.arange(0, 365.25 * 10 + 40, 40) - step metric = metrics.AccumulateUniformityMetric(bins=bins) plotDict = {'xlabel': 'Night (days)', 'xextent': [bins.min( ) + step, bins.max() + step], 'cbarTitle': 'Uniformity'} for f in filters: sqlconstraint = 'filter = "%s"' % (f) caption = 'Deviation from uniformity in %s band. ' % f caption += 'Northern Healpixels are at the top of the image.' caption += '(0=perfectly uniform, 1=perfectly nonuniform).' displayDict = {'group': uniformitygroup, 'subgroup': 'per night', 'order': filtorder[f], 'caption': caption} metadata = '%s band' % (f) + slicermetadata bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, runName=runName, metadata=metadata, plotFuncs=plotFuncs) noSaveBundleList.append(bundle) ## # Depth metrics. slicer = slicers.HealpixSlicer(nside=nside, lonCol=lonCol, latCol=latCol) for f in filters: propCaption = '%s band, all proposals %s' % (f, slicermetadata) sqlconstraint = 'filter = "%s"' % (f) metadata = '%s band' % (f) + slicermetadata # Number of visits. metric = metrics.CountMetric(col='observationStartMJD', metricName='NVisits') plotDict = {'xlabel': 'Number of visits', 'xMin': nvisitsRange['all'][f][0], 'xMax': nvisitsRange['all'][f][1], 'colorMin': nvisitsRange['all'][f][0], 'colorMax': nvisitsRange['all'][f][1], 'binsize': 5, 'logScale': True, 'nTicks': 4, 'colorMin': 1} summaryStats = allStats displayDict = {'group': depthgroup, 'subgroup': 'Nvisits', 'order': filtorder[f], 'caption': 'Number of visits in filter %s, %s.' % (f, propCaption)} histMerge = {'color': colors[f], 'label': '%s' % (f), 'binsize': 5, 'xMin': nvisitsRange['all'][f][0], 'xMax': nvisitsRange['all'][f][1], 'legendloc': 'upper right'} bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, runName=runName, metadata=metadata, summaryMetrics=summaryStats) mergedHistDict['NVisits'].addBundle(bundle, plotDict=histMerge) bundleList.append(bundle) # Coadded depth. metric = metrics.Coaddm5Metric() plotDict = {'zp': benchmarkVals['coaddedDepth'][f], 'xMin': -0.8, 'xMax': 0.8, 'xlabel': 'coadded m5 - %.1f' % benchmarkVals['coaddedDepth'][f]} summaryStats = allStats histMerge = {'legendloc': 'upper right', 'color': colors[f], 'label': '%s' % f, 'binsize': .02, 'xlabel': 'coadded m5 - benchmark value'} caption = ('Coadded depth in filter %s, with %s value subtracted (%.1f), %s. ' % (f, benchmark, benchmarkVals['coaddedDepth'][f], propCaption)) caption += 'More positive numbers indicate fainter limiting magnitudes.' displayDict = {'group': depthgroup, 'subgroup': 'Coadded Depth', 'order': filtorder[f], 'caption': caption} bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, runName=runName, metadata=metadata, summaryMetrics=summaryStats) mergedHistDict['coaddm5'].addBundle(bundle, plotDict=histMerge) bundleList.append(bundle) # Effective time. metric = metrics.TeffMetric(metricName='Normalized Effective Time', normed=True, fiducialDepth=benchmarkVals['singleVisitDepth']) plotDict = {'xMin': 0.1, 'xMax': 1.1} summaryStats = allStats histMerge = {'legendLoc': 'upper right', 'color': colors[f], 'label': '%s' % f, 'binsize': 0.02} caption = ('"Time Effective" in filter %s, calculated with fiducial single-visit depth of %s mag. ' % (f, benchmarkVals['singleVisitDepth'][f])) caption += 'Normalized by the fiducial time effective, if every observation was at ' caption += 'the fiducial depth.' displayDict = {'group': depthgroup, 'subgroup': 'Time Eff.', 'order': filtorder[f], 'caption': caption} bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, runName=runName, metadata=metadata, summaryMetrics=summaryStats) mergedHistDict['NormEffTime'].addBundle(bundle, plotDict=histMerge) bundleList.append(bundle) # Put in a z=0.5 Type Ia SN, based on Cambridge 2015 workshop notebook. # Check for 1) detection in any band, 2) detection on the rise in any band, # 3) good characterization peaks = {'uPeak': 25.9, 'gPeak': 23.6, 'rPeak': 22.6, 'iPeak': 22.7, 'zPeak': 22.7, 'yPeak': 22.8} peakTime = 15. transDuration = peakTime + 30. # Days metric = metrics.TransientMetric(riseSlope=-2. / peakTime, declineSlope=1.4 / 30.0, transDuration=transDuration, peakTime=peakTime, surveyDuration=runLength, metricName='SNDetection', **peaks) caption = 'Fraction of z=0.5 type Ia SN that are detected in any filter' displayDict = {'group': transgroup, 'subgroup': 'Detected', 'caption': caption} sqlconstraint = '' metadata = '' + slicermetadata plotDict = {} bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, runName=runName, metadata=metadata) bundleList.append(bundle) metric = metrics.TransientMetric(riseSlope=-2. / peakTime, declineSlope=1.4 / 30.0, transDuration=transDuration, peakTime=peakTime, surveyDuration=runLength, nPrePeak=1, metricName='SNAlert', **peaks) caption = 'Fraction of z=0.5 type Ia SN that are detected pre-peak in any filter' displayDict = {'group': transgroup, 'subgroup': 'Detected on the rise', 'caption': caption} plotDict = {} bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, runName=runName, metadata=metadata) bundleList.append(bundle) metric = metrics.TransientMetric(riseSlope=-2. / peakTime, declineSlope=1.4 / 30., transDuration=transDuration, peakTime=peakTime, surveyDuration=runLength, metricName='SNLots', nFilters=3, nPrePeak=3, nPerLC=2, **peaks) caption = 'Fraction of z=0.5 type Ia SN that are observed 6 times, 3 pre-peak, ' caption += '3 post-peak, with observations in 3 filters' displayDict = {'group': transgroup, 'subgroup': 'Well observed', 'caption': caption} sqlconstraint = 'filter="r" or filter="g" or filter="i" or filter="z" ' plotDict = {} bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, runName=runName, metadata=metadata) bundleList.append(bundle) # Good seeing in r/i band metrics, including in first/second years. order = 0 for tcolor, tlabel, timespan in zip(['k', 'g', 'r'], ['10 years', '1 year', '2 years'], ['', ' and night<=365', ' and night<=730']): order += 1 for f in (['r', 'i']): sqlconstraint = 'filter = "%s" %s' % (f, timespan) propCaption = '%s band, all proposals %s, over %s.' % (f, slicermetadata, tlabel) metadata = '%s band, %s' % (f, tlabel) + slicermetadata seeing_limit = 0.7 airmass_limit = 1.2 metric = metrics.MinMetric(col=seeingCol) summaryStats = allStats plotDict = {'xMin': 0.35, 'xMax': 1.5, 'color': tcolor} displayDict = {'group': seeinggroup, 'subgroup': 'Best Seeing', 'order': filtorder[f] * 100 + order, 'caption': 'Minimum FWHMgeom values in %s.' % (propCaption)} histMerge = {'label': '%s %s' % (f, tlabel), 'color': tcolor, 'binsize': 0.03, 'xMin': 0.35, 'xMax': 1.5, 'legendloc': 'upper right'} bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, runName=runName, metadata=metadata, summaryMetrics=summaryStats) mergedHistDict['Minseeing'].addBundle(bundle, plotDict=histMerge) bundleList.append(bundle) metric = metrics.FracAboveMetric(col=seeingCol, cutoff=seeing_limit) summaryStats = allStats plotDict = {'xMin': 0, 'xMax': 1.1, 'color': tcolor} displayDict = {'group': seeinggroup, 'subgroup': 'Good seeing fraction', 'order': filtorder[f] * 100 + order, 'caption': 'Fraction of total images with FWHMgeom worse than %.1f, in %s' % (seeing_limit, propCaption)} histMerge = {'color': tcolor, 'label': '%s %s' % (f, tlabel), 'binsize': 0.05, 'legendloc': 'upper right'} bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, runName=runName, metadata=metadata, summaryMetrics=summaryStats) mergedHistDict['seeingAboveLimit'].addBundle(bundle, plotDict=histMerge) bundleList.append(bundle) metric = metrics.MinMetric(col='airmass') plotDict = {'xMin': 1, 'xMax': 1.5, 'color': tcolor} summaryStats = allStats displayDict = {'group': airmassgroup, 'subgroup': 'Best Airmass', 'order': filtorder[f] * 100 + order, 'caption': 'Minimum airmass in %s.' % (propCaption)} histMerge = {'color': tcolor, 'label': '%s %s' % (f, tlabel), 'binsize': 0.03, 'legendloc': 'upper right'} bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, runName=runName, metadata=metadata, summaryMetrics=summaryStats) mergedHistDict['minAirmass'].addBundle(bundle, plotDict=histMerge) bundleList.append(bundle) metric = metrics.FracAboveMetric(col='airmass', cutoff=airmass_limit) plotDict = {'xMin': 0, 'xMax': 1, 'color': tcolor} summaryStats = allStats displayDict = {'group': airmassgroup, 'subgroup': 'Low airmass fraction', 'order': filtorder[f] * 100 + order, 'caption': 'Fraction of total images with airmass higher than %.2f, in %s' % (airmass_limit, propCaption)} histMerge = {'color': tcolor, 'label': '%s %s' % ( f, tlabel), 'binsize': 0.05, 'legendloc': 'upper right'} bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, runName=runName, metadata=metadata, summaryMetrics=summaryStats) mergedHistDict['fracAboveAirmass'].addBundle(bundle, plotDict=histMerge) bundleList.append(bundle) # SNe metrics from UK workshop. peaks = {'uPeak': 25.9, 'gPeak': 23.6, 'rPeak': 22.6, 'iPeak': 22.7, 'zPeak': 22.7, 'yPeak': 22.8} peakTime = 15. transDuration = peakTime + 30. # Days metric = metrics.TransientMetric(riseSlope=-2. / peakTime, declineSlope=1.4 / 30.0, transDuration=transDuration, peakTime=peakTime, surveyDuration=runLength, metricName='SNDetection', **peaks) caption = 'Fraction of z=0.5 type Ia SN that are detected at any point in their light curve in any filter' displayDict = {'group': sngroup, 'subgroup': 'Detected', 'caption': caption} sqlconstraint = '' plotDict = {} bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, runName=runName) bundleList.append(bundle) metric = metrics.TransientMetric(riseSlope=-2. / peakTime, declineSlope=1.4 / 30.0, transDuration=transDuration, peakTime=peakTime, surveyDuration=runLength, nPrePeak=1, metricName='SNAlert', **peaks) caption = 'Fraction of z=0.5 type Ia SN that are detected pre-peak in any filter' displayDict = {'group': sngroup, 'subgroup': 'Detected on the rise', 'caption': caption} plotDict = {} bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, runName=runName) bundleList.append(bundle) metric = metrics.TransientMetric(riseSlope=-2. / peakTime, declineSlope=1.4 / 30., transDuration=transDuration, peakTime=peakTime, surveyDuration=runLength, metricName='SNLots', nFilters=3, nPrePeak=3, nPerLC=2, **peaks) caption = 'Fraction of z=0.5 type Ia SN that are observed 6 times, 3 pre-peak, ' caption += '3 post-peak, with observations in 3 filters' displayDict = {'group': sngroup, 'subgroup': 'Well observed', 'caption': caption} sqlconstraint = 'filter="r" or filter="g" or filter="i" or filter="z" ' plotDict = {} bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, runName=runName) bundleList.append(bundle) propIDOrderDict = {} orderVal = 100 for propID in propids: propIDOrderDict[propID] = orderVal orderVal += 100 # Full range of dates: metric = metrics.FullRangeMetric(col='observationStartMJD') plotFuncs = [plots.HealpixSkyMap(), plots.HealpixHistogram()] caption = 'Time span of survey.' sqlconstraint = '' plotDict = {} displayDict = {'group': rangeGroup, 'caption': caption} bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, plotDict=plotDict, displayDict=displayDict, runName=runName) bundleList.append(bundle) for f in filters: for propid in propids: displayDict = {'group': rangeGroup, 'subgroup': propids[propid], 'caption': caption, 'order': filtorder[f]} md = '%s, %s' % (f, propids[propid]) sql = 'filter="%s" and proposalId=%i' % (f, propid) bundle = metricBundles.MetricBundle(metric, slicer, sql, plotDict=plotDict, metadata=md, plotFuncs=plotFuncs, displayDict=displayDict, runName=runName) bundleList.append(bundle) # Alt az plots slicer = slicers.HealpixSlicer(nside=64, latCol='zenithDistance', lonCol='azimuth', useCache=False) metric = metrics.CountMetric('observationStartMJD', metricName='Nvisits as function of Alt/Az') plotDict = {} plotFuncs = [plots.LambertSkyMap()] displayDict = {'group': altAzGroup, 'caption': 'Alt Az pointing distribution'} for f in filters: for propid in propids: displayDict = {'group': altAzGroup, 'subgroup': propids[propid], 'caption': 'Alt Az pointing distribution', 'order': filtorder[f]} md = '%s, %s' % (f, propids[propid]) sql = 'filter="%s" and proposalId=%i' % (f, propid) bundle = metricBundles.MetricBundle(metric, slicer, sql, plotDict=plotDict, plotFuncs=plotFuncs, metadata=md, displayDict=displayDict, runName=runName) bundleList.append(bundle) sql = '' md = 'all observations' displayDict = {'group': altAzGroup, 'subgroup': 'All Observations', 'caption': 'Alt Az pointing distribution'} bundle = metricBundles.MetricBundle(metric, slicer, sql, plotDict=plotDict, plotFuncs=plotFuncs, metadata=md, displayDict=displayDict, runName=runName) bundleList.append(bundle) # Median inter-night gap (each and all filters) slicer = slicers.HealpixSlicer(nside=nside, lonCol=lonCol, latCol=latCol) metric = metrics.InterNightGapsMetric(metricName='Median Inter-Night Gap') sqls = ['filter = "%s"' % f for f in filters] orders = [filtorder[f] for f in filters] orders.append(0) sqls.append('') for sql, order in zip(sqls, orders): displayDict = {'group': intergroup, 'subgroup': 'Median Gap', 'caption': 'Median gap between days', 'order': order} bundle = metricBundles.MetricBundle(metric, slicer, sql, displayDict=displayDict, runName=runName) bundleList.append(bundle) # Max inter-night gap in r and all bands dslicer = slicers.HealpixSlicer(nside=nside, lonCol='ditheredRA', latCol='ditheredDec') metric = metrics.InterNightGapsMetric(metricName='Max Inter-Night Gap', reduceFunc=np.max) plotDict = {'percentileClip': 95.} for sql, order in zip(sqls, orders): displayDict = {'group': intergroup, 'subgroup': 'Max Gap', 'caption': 'Max gap between nights', 'order': order} bundle = metricBundles.MetricBundle(metric, dslicer, sql, displayDict=displayDict, plotDict=plotDict, runName=runName) bundleList.append(bundle) # largest phase gap for periods periods = [0.1, 1.0, 10., 100.] sqls = {'u': 'filter = "u"', 'r': 'filter="r"', 'g,r,i,z': 'filter="g" or filter="r" or filter="i" or filter="z"', 'all': ''} for sql in sqls: for period in periods: displayDict = {'group': phaseGroup, 'subgroup': 'period=%.2f days, filter=%s' % (period, sql), 'caption': 'Maximum phase gaps'} metric = metrics.PhaseGapMetric(nPeriods=1, periodMin=period, periodMax=period, metricName='PhaseGap, %.1f' % period) bundle = metricBundles.MetricBundle(metric, slicer, sqls[sql], displayDict=displayDict, runName=runName) bundleList.append(bundle) # NEO XY plots slicer = slicers.UniSlicer() metric = metrics.PassMetric(metricName='NEODistances') stacker = stackers.NEODistStacker() stacker2 = stackers.EclipticStacker() for f in filters: plotFunc = plots.NeoDistancePlotter(eclipMax=10., eclipMin=-10.) caption = 'Observations within 10 degrees of the ecliptic. Distance an H=22 NEO would be detected' displayDict = {'group': NEOGroup, 'subgroup': 'xy', 'order': filtorder[f], 'caption': caption} plotDict = {} sqlconstraint = 'filter = "%s"' % (f) bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, displayDict=displayDict, stackerList=[stacker, stacker2], plotDict=plotDict, plotFuncs=[plotFunc]) noSaveBundleList.append(bundle) # Solar elongation sqls = ['filter = "%s"' % f for f in filters] orders = [filtorder[f] for f in filters] sqls.append('') orders.append(0) for sql, order in zip(sqls, orders): plotFuncs = [plots.HealpixSkyMap(), plots.HealpixHistogram()] displayDict = {'group': NEOGroup, 'subgroup': 'Solar Elongation', 'caption': 'Median solar elongation in degrees', 'order': order} metric = metrics.MedianMetric('solarElong') slicer = slicers.HealpixSlicer(nside=nside, lonCol=lonCol, latCol=latCol) bundle = metricBundles.MetricBundle(metric, slicer, sql, displayDict=displayDict, plotFuncs=plotFuncs) bundleList.append(bundle) plotFuncs = [plots.HealpixSkyMap(), plots.HealpixHistogram()] displayDict = {'group': NEOGroup, 'subgroup': 'Solar Elongation', 'caption': 'Minimum solar elongation in degrees', 'order': order} metric = metrics.MinMetric('solarElong') slicer = slicers.HealpixSlicer(nside=nside, lonCol=lonCol, latCol=latCol) bundle = metricBundles.MetricBundle(metric, slicer, sql, displayDict=displayDict, plotFuncs=plotFuncs) bundleList.append(bundle) return (metricBundles.makeBundlesDictFromList(bundleList), mergedHistDict, metricBundles.makeBundlesDictFromList(noSaveBundleList))
def go(nside=64, rmag=21., SedTemplate='flat', DoRun=False, LFilters = [], \ LNightMax=[], nightMax=1e4, \ CustomPlotLimits=True, \ RunOne=False, MaxRuns=1e3, \ SpatialClip=95., \ seeingCol='FWHMeff', \ sCmap='cubehelix_r', \ checkCorrKind=False, \ wfdPlane=True, \ useGRIZ=False): # Go to the directory where the sqlite databases are held... # cd /Users/clarkson/Data/LSST/OpSimRuns/opsim20160411 # WIC 2015-12-29 - set up for a master-run with all cases, this time with plotting limits # Break the specifications across lines to make subdivision easier # Subsets by time first, then by filter, finally the whole shebang # 2016-04-23 - replaced enigma_1189 --> minion_1016 # 2016-04-23 - replaced ops2_1092 --> minion_1020 # (Yes the inversion of the first two is deliberate.) runNames = ['minion_1016', 'minion_1020', 'minion_1020', 'minion_1016', \ 'minion_1020', 'minion_1016', 'minion_1020', 'minion_1016', \ 'minion_1020', 'minion_1016'] LFilters = ['', '', '', '', \ 'u', 'u', 'y', 'y', \ '', ''] LNightMax = [365, 365, 730, 730, \ 1e4, 1e4, 1e4, 1e4, \ 1e4, 1e4] # WIC try again, this time on the new astro_lsst_01_1004 only if wfdPlane: LFilters = ['', '', '', 'u', 'y'] LNightMax = [365, 730, 1e4, 1e4, 1e4] runNames = ['astro_lsst_01_1004' for i in range (len(LFilters)) ] # WIC 2016-05-01 check correlation if checkCorrKind: LFilters = ['', ''] LNightMax = [365, 365] runNames = ['minion_1016', 'minion_1016'] # Type of correlation used for HA Degen # checkCorrKind = True useSpearmanR = [False, True] if useGRIZ: runNames=['minion_1016','astro_lsst_01_1004', 'minion_1020'] LFilters = ['griz' for iRun in range(len(runNames)) ] #LNightMax = [1e4 for iRun in range(len(runNames)) ] #LNightMax = [730 for iRun in range(len(runNames)) ] LNightMax = [365 for iRun in range(len(runNames)) ] # List of upper limits to parallax and proper motion error. For parallax, 3.0 mas is probably good LUpperParallax = [] LUpperPropmotion = [] if CustomPlotLimits: LUpperParallax = [10, 10, 10, 10, \ 10, 10, 40, 40, \ 3.0, 3.0 ] # For proper motion, it's a little tricky to say because the # regular case is so pathological for the field. Try the following: LUpperPropmotion = [40, 40, 5, 20, \ 3.5, 20, 3.5, 20, \ 0.5, 5] if len(runNames) < 2: LUpperPropmotion = [100 for i in range(len(runNames))] print "runAstrom.go INFO - will run the following:" for iSho in range(len(runNames)): sFilThis = '' # print iSho, len(LFilters) if iSho <= len(LFilters): sFilThis = sqlFromFilterString(LFilters[iSho]) print "%i: %-12s, %1s, %i, sqlFilter -- %s" % (iSho, runNames[iSho], LFilters[iSho], LNightMax[iSho], sFilThis) print "===========================" print "mag max = %.2f" % (rmag) print "---------------------------" # print runNames # if not DoRun: # print "Set DoRun=True to actually run this." # print len(LFilters), len(runNames), len(LFilters) == len(runNames) # return #'kraken_1038', 'kraken_1034', 'ops2_1098'] # nside = 64 slicer = slicers.HealpixSlicer(nside=nside) # Make it so we don't bother with the silly power spectra plotFuncs = [plots.HealpixSkyMap(), plots.HealpixHistogram()] # WIC - back up the plotting arguments with a default value plotFuncsPristine = copy.deepcopy(plotFuncs) # WIC - the only way this will make sense to me is if I make a # dictionary of plot arguments. Let's try it... DPlotArgs = {} for plotArg in ['parallax', 'propmotion', 'coverage', 'HAdegen']: DPlotArgs[plotArg] = copy.deepcopy(plotFuncs) if CustomPlotLimits: # Use the same color map for all the metrics for plotMetric in DPlotArgs.keys(): DPlotArgs[plotMetric][0].defaultPlotDict['cmap'] = sCmap # Apply spatial clipping for all but the HADegen, for which we # have other limits... for plotMetric in ['parallax', 'propmotion', 'coverage']: DPlotArgs[plotMetric][0].defaultPlotDict['percentileClip'] = SpatialClip # Some limits common to spatial maps and histograms for iPl in range(0,2): DPlotArgs['propmotion'][iPl].defaultPlotDict['logScale'] = True # NOT a loop because we might want to separate out the behavior # Standardized range for the histograms for new parallax metrics DPlotArgs['coverage'][1].defaultPlotDict['xMin'] = 0. DPlotArgs['coverage'][1].defaultPlotDict['xMax'] = 1. DPlotArgs['HAdegen'][1].defaultPlotDict['xMin'] = -1. DPlotArgs['HAdegen'][1].defaultPlotDict['xMax'] = 1. # Standardize the sky map for the HAdegen as well. DPlotArgs['coverage'][1].defaultPlotDict['xMin'] = 0. DPlotArgs['coverage'][1].defaultPlotDict['xMax'] = 1. DPlotArgs['HAdegen'][0].defaultPlotDict['xMin'] = -1. DPlotArgs['HAdegen'][0].defaultPlotDict['xMax'] = 1. # Standardize at least the lower bound of the histogram in # both the proper motion and parallax errors. Upper limit we # can customize with a loop. DPlotArgs['propmotion'][1].defaultPlotDict['xMin'] = 1e-2 # should not be zero if log scale!! DPlotArgs['parallax'][1].defaultPlotDict['xMin'] = 0. # WIC - try changing the plot dictionary if not DoRun: plotFuncs[0].defaultPlotDict['logScale'] = True print DPlotArgs['propmotion'][0].defaultPlotDict print DPlotArgs['propmotion'][1].defaultPlotDict return # The old runs have the seeing in finSeeing #seeingCol = 'finSeeing' ### UPDATE THE SEEING COLUMN #seeingCol = 'FWHMeff' ## Moved up to a command-line argument # Use all the observations. Can change if you want a different # time span # sqlconstraint = '' # list of sqlconstraints now used, which gets handled within the loop. # run some summary stats on everything summaryMetrics = [metrics.MedianMetric()] tStart = time.time() # Running one, or the whole lot? RunMax = len(runNames) # allow user to set a different number (say, 2) if MaxRuns < RunMax and MaxRuns > 0: RunMax = int(MaxRuns) # the following keyword overrides if RunOne: RunMax = 1 print "Starting runs. RunMax = %i" % (RunMax) for iRun in range(RunMax): run = runNames[iRun][:] # for run in runNames: # Open the OpSim database timeStartIteration = time.time() # Some syntax added to test for existence of the database dbFil = run+'_sqlite.db' if not os.access(dbFil, os.R_OK): print "runAstrom.go FATAL - cannot acces db file %s" % (dbFil) print "runAstrom.go FATAL - skipping run %s" % (run) continue else: deltaT = time.time()-tStart print "runAstrom.go INFO - ##################################" print "runAstrom.go INFO - starting run %s with nside=%i after %.2f minutes" \ % (run, nside, deltaT/60.) opsdb = db.OpsimDatabase(run+'_sqlite.db') # Set SQL constraint appropriate for each filter in the # list. If we supplied a list of filters, use it for sqlconstraint = '' ThisFilter = 'ugrizy' if len(LFilters) == len(runNames): # Only change the filter if one was actually supplied! if len(LFilters[iRun]) > 0: ThisFilter = LFilters[iRun] sqlconstraint = sqlFromFilterString(ThisFilter) ### sqlconstraint = 'filter = "%s"' % (ThisFilter) # If nightmax was supplied, use it ThisNightMax = int(nightMax) # copy not view if len(LNightMax) == len(runNames): # Only update nightmax if one was given try: ThisNightMax = int(LNightMax[iRun]) # This might be redundant with the fmt statement below. if len(sqlconstraint) < 1: sqlconstraint = 'night < %i' % (ThisNightMax) else: sqlconstraint = '%s and night < %i' % (sqlconstraint, ThisNightMax) except: print "runAstrom.go WARN - run %i problem with NightMax" % (iRun) dumdum = 1. # Set where the output should go - include the filter!! sMag = '%.1f' % (rmag) sMag = sMag.replace(".","p") outDir = './metricEvals/%s_nside%i_%s_n%i_r%s' % (run, nside, ThisFilter, ThisNightMax, sMag) # Ensure we'll be able to find this later on... if CustomPlotLimits: outDir = '%s_lims' % (outDir) # if we are testing the kind of correlation used, include that # in the output here. if checkCorrKind: if useSpearmanR[iRun]: sCorr = 'spearmanR' else: sCorr = 'pearsonR' outDir = '%s_%s' % (outDir, sCorr) # From this point onwards, stuff actually gets run. This is # the place to output what will actually happen next. print "runAstrom.go INFO - about to run:" print "runAstrom.go INFO - sqlconstraint: %s ; run name %s ; nside %i" % (sqlconstraint, run, nside) print "runAstrom.go INFO - output directory will be %s" % (outDir) if not DoRun: continue # ensure the output directory actually exists... if not os.access(outDir, os.R_OK): print "runAstrom.go INFO - creating output directory %s" % (outDir) os.makedirs(outDir) resultsDb = db.ResultsDb(outDir=outDir) bundleList = [] # WIC - to make this at least somewhat uniform, build the plot # functions including arguments out of our copies above. plotFuncsPropmotion = copy.deepcopy(DPlotArgs['propmotion']) plotFuncsParallax = copy.deepcopy(DPlotArgs['parallax']) plotFuncsCoverage = copy.deepcopy(DPlotArgs['coverage']) plotFuncsHAdegen = copy.deepcopy(DPlotArgs['HAdegen']) # if using custom plot limits, will want to include the limits # for proper motion and parallax too... programming a bit defensively # here, including an extra check (rather than just the length of the lists # above). if CustomPlotLimits: if len(LUpperParallax) == len(runNames): plotFuncsParallax[1].defaultPlotDict['xMax'] = float(LUpperParallax[iRun]) if len(LUpperPropmotion) == len(runNames): plotFuncsPropmotion[1].defaultPlotDict['xMax'] = float(LUpperPropmotion[iRun]) # Configure the metrics metric = metrics.ParallaxMetric(rmag=rmag, seeingCol=seeingCol, SedTemplate=SedTemplate) bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, runName=run, # plotFuncs=plotFuncs, \ plotFuncs = plotFuncsParallax, \ summaryMetrics=summaryMetrics) bundleList.append(bundle) metric=metrics.ProperMotionMetric(rmag=rmag, seeingCol=seeingCol, SedTemplate=SedTemplate) bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, runName=run, # plotFuncs=plotFuncs, \ plotFuncs=plotFuncsPropmotion, \ summaryMetrics=summaryMetrics) bundleList.append(bundle) metric = calibrationMetrics.ParallaxCoverageMetric(rmag=rmag, seeingCol=seeingCol, SedTemplate=SedTemplate) bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, runName=run, # plotFuncs=plotFuncs, \ plotFuncs=plotFuncsCoverage, \ summaryMetrics=summaryMetrics) bundleList.append(bundle) # Now for the HA Degen metric. If testing the type of # correlation, call the metric differently here. Since the # argument to actually do this is only part of my github fork # at the moment, we use a different call. Running with default # arguments (checkCorrKind=False) should then work without # difficulty. metric = calibrationMetrics.ParallaxHADegenMetric(rmag=rmag, seeingCol=seeingCol, SedTemplate=SedTemplate) if checkCorrKind: metric = calibrationMetrics.ParallaxHADegenMetric(rmag=rmag, seeingCol=seeingCol, SedTemplate=SedTemplate, useSpearmanR=useSpearmanR[iRun]) print "TESTING CORRELATION KIND -- useSpearmanR", useSpearmanR[iRun] bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, runName=run, # plotFuncs=plotFuncs, \ plotFuncs=plotFuncsHAdegen, \ summaryMetrics=summaryMetrics) bundleList.append(bundle) # Run everything and make plots bundleDict = metricBundles.makeBundlesDictFromList(bundleList) bgroup = metricBundles.MetricBundleGroup(bundleDict, opsdb, outDir=outDir, resultsDb=resultsDb) # try: bgroup.runAll() print "runAstrom.go INFO - bundles took %.2f minutes" \ % ((time.time() - timeStartIteration) / 60.) # except KeyboardInterrupt: # print "runAstrom.go FATAL - keyboard interrupt detected. Halting." # return bgroup.plotAll() print "runAstrom.go INFO - bundles + plotting took %.2f minutes" \ % ((time.time() - timeStartIteration) / 60.) print "Finished entire set. %i runs took %.2f minutes." % (iRun + 1, (time.time()-tStart)/60.)