Esempio n. 1
0
 def testDbCreation(self):
     # Test default sqlite file created (even if outDir doesn't exist)
     resultsdb = db.ResultsDb(outDir=self.outDir)
     self.assertTrue(
         os.path.isfile(os.path.join(self.outDir, 'resultsDb_sqlite.db')))
     resultsdb.close()
     # Test can pick custom name in directory that exists.
     sqlitefilename = os.path.join(self.outDir, 'testDb_sqlite.db')
     resultsdb = db.ResultsDb(database=sqlitefilename)
     self.assertTrue(os.path.isfile(sqlitefilename))
     resultsdb.close()
     # Test that get appropriate exception if directory doesn't exist.
     sqlitefilename = os.path.join(self.outDir + 'test', 'testDb_sqlite.db')
     self.assertRaises(ValueError, db.ResultsDb, database=sqlitefilename)
Esempio n. 2
0
def runGlance(dbfile, outDir='Glance', runName='runname'):
    conn = db.SimpleDatabase(dbfile,
                             defaultTable='SummaryAllProps',
                             defaultdbTables={
                                 'SummaryAllProps':
                                 ['SummaryAllProps', 'observationId']
                             })
    colmap = {
        'ra': 'RA',
        'dec': 'dec',
        'mjd': 'mjd',
        'exptime': 'exptime',
        'visittime': 'exptime',
        'alt': 'alt',
        'az': 'az',
        'filter': 'filter',
        'fiveSigmaDepth': 'fivesigmadepth',
        'night': 'night',
        'slewtime': 'slewtime',
        'seeingGeom': 'FWHM_geometric'
    }

    gb = glanceBundle(colmap_dict=colmap)
    resultsDb = db.ResultsDb(outDir=outDir)

    group = metricBundles.MetricBundleGroup(gb,
                                            conn,
                                            outDir=outDir,
                                            resultsDb=resultsDb,
                                            runName=runName)

    group.runAll()
    group.plotAll()
Esempio n. 3
0
def get_cadences():
    outDir = 'LightCurve'
    dbFile = '/Users/ruthangus/Downloads/minion_1016_sqlite.db'
    opsimdb = utils.connectOpsimDb(dbFile)
    resultsDb = db.ResultsDb(outDir=outDir)

    filters = ['u', 'g', 'r', 'i', 'z', 'y']

    # SNR limit (Don't use points below this limit)
    snrLimit = 5.
    # Demand this many points above SNR limit before plotting LC
    nPtsLimit = 6

    # Set RA, Dec for a single point in the sky. in radians.
    ls, bs, ras_deg, decs_deg = np.genfromtxt("coords.txt", skip_header=1).T
    ras_rad = np.radians(ras_deg)
    decs_rad = np.radians(decs_deg)

    for i, ra in enumerate(ras_rad):
        print("b = ", bs[i])
        times, five_sig_depths = get_cadence(ra, decs_rad[i], bs[i], snrLimit,
                                             nPtsLimit, filters, outDir,
                                             opsimdb, resultsDb)
        for j in range(len(filters)):
            data = np.vstack(
                (np.array(times[j]), np.array(five_sig_depths[j])))
            np.savetxt("l45b{0}_{1}_cadence.txt".format(bs[i], filters[j]),
                       data.T)
    def __getOpSimMjd(self, opsim, ra, dec, fil):
        colmn = 'observationStartMJD'
        opsdb = db.OpsimDatabase(opsim)

        # Directory where tmp files are going to be stored TODO eliminate - this
        outDir = 'TmpDir'
        resultsDb = db.ResultsDb(outDir=outDir)

        metric = metrics.PassMetric(cols=[colmn, 'fiveSigmaDepth', 'filter'])
        slicer = slicers.UserPointsSlicer(ra=ra, dec=dec)
        sqlconstraint = 'filter = \'' + fil + '\''

        bundle = mb.MetricBundle(metric, slicer, sqlconstraint, runName='name')
        bgroup = mb.MetricBundleGroup({0: bundle},
                                      opsdb,
                                      outDir=outDir,
                                      resultsDb=resultsDb)
        bgroup.runAll()

        filters = np.unique(bundle.metricValues[0]['filter'])
        mv = bundle.metricValues[0]

        # Get dates
        mjd = mv[colmn]
        mjd = np.sort(mjd)
        print('Num of visits ' + str(len(mjd)) + ' ' + opsim)
        return mjd
Esempio n. 5
0
 def testAddData(self):
     tempdir = tempfile.mkdtemp(prefix='resDb')
     resultsDb = db.ResultsDb(outDir=tempdir)
     # Add metric.
     metricId = resultsDb.updateMetric(self.metricName, self.slicerName,
                                       self.runName, self.constraint,
                                       self.metadata, self.metricDataFile)
     # Try to re-add metric (should get back same metric id as previous, with no add).
     metricId2 = resultsDb.updateMetric(self.metricName, self.slicerName,
                                        self.runName, self.constraint,
                                        self.metadata, self.metricDataFile)
     self.assertEqual(metricId, metricId2)
     run1 = resultsDb.session.query(db.MetricRow).filter_by(metricId=metricId).all()
     self.assertEqual(len(run1), 1)
     # Add plot.
     resultsDb.updatePlot(metricId, self.plotType, self.plotName)
     # Add normal summary statistics.
     resultsDb.updateSummaryStat(metricId, self.summaryStatName1, self.summaryStatValue1)
     resultsDb.updateSummaryStat(metricId, self.summaryStatName2, self.summaryStatValue2)
     # Add something like tableFrac summary statistic.
     resultsDb.updateSummaryStat(metricId, self.summaryStatName3, self.summaryStatValue3)
     # Test get warning when try to add a non-conforming summary stat (not 'name' & 'value' cols).
     teststat = np.empty(10, dtype=[('col', '|S12'), ('value', float)])
     with warnings.catch_warnings(record=True) as w:
         warnings.simplefilter("always")
         resultsDb.updateSummaryStat(metricId, 'testfail', teststat)
         self.assertTrue("not save" in str(w[-1].message))
     # Test get warning when try to add a string (non-conforming) summary stat.
     teststat = 'teststring'
     with warnings.catch_warnings(record=True) as w:
         warnings.simplefilter("always")
         resultsDb.updateSummaryStat(metricId, 'testfail', teststat)
         self.assertTrue("not save" in str(w[-1].message))
     shutil.rmtree(tempdir)
Esempio n. 6
0
    def testOut(self):
        """
        Check that the metric bundle can generate the expected output
        """
        slicer = slicers.HealpixSlicer(nside=8)
        metric = metrics.MeanMetric(col='airmass')
        sql = 'filter="r"'

        metricB = metricBundles.MetricBundle(metric, slicer, sql)
        filepath = os.path.join(os.getenv('SIMS_MAF_DIR'), 'tests/')

        database = os.path.join(filepath, 'opsimblitz1_1133_sqlite.db')
        opsdb = db.OpsimDatabase(database=database)
        resultsDb = db.ResultsDb(outDir=self.outDir)

        bgroup = metricBundles.MetricBundleGroup({0: metricB},
                                                 opsdb,
                                                 outDir=self.outDir,
                                                 resultsDb=resultsDb)
        bgroup.runAll()
        bgroup.plotAll()
        bgroup.writeAll()

        outThumbs = glob.glob(os.path.join(self.outDir, 'thumb*'))
        outNpz = glob.glob(os.path.join(self.outDir, '*.npz'))
        outPdf = glob.glob(os.path.join(self.outDir, '*.pdf'))

        # By default, make 3 plots for healpix
        assert (len(outThumbs) == 3)
        assert (len(outPdf) == 3)
        assert (len(outNpz) == 1)
Esempio n. 7
0
def connect_dbs(dbDir, outDir, dbRuns=None):
    """
    Initiate database objects to all opSim databases in the provided directory.
    Returns a dictionary consisting all database connections and a dictionary
    holding the resultsDb objects.

    Args:
        dbDir(str): The path to the dabase directory.
        outDir(str): The path to the result database directory.

    Returns:
        opSimDbs(dict): A dictionary containing the OpsimDatabase objects for
            opsim databases in the provided directory, keys are the run names.
        resultDbs(str): A dictionary containing the ResultsDb objects for opsim
            databases in the provided directory, keys are the run names.
    """
    opSimDbs = {}
    resultDbs = {}

    if dbRuns is None:
        dbDir = os.path.abspath(dbDir)
        db_list = glob.glob(dbDir + '/*.db')
    else:
        db_list = [os.path.join(dbDir, dbRun + '.db') for dbRun in dbRuns]

    for dbPath in db_list:
        dbName = os.path.basename(dbPath)
        opSimDbs[os.path.splitext(dbName)[0]] = db.OpsimDatabase(dbPath)
        resultDbs[os.path.splitext(dbName)[0]] = \
            db.ResultsDb(outDir=outDir,
                         database=os.path.splitext(dbName)[0]+'_result.db')
    return (opSimDbs, resultDbs)
Esempio n. 8
0
def runChips(useCamera=False):
    import numpy as np
    import lsst.sims.maf.slicers as slicers
    import lsst.sims.maf.metrics as metrics
    import lsst.sims.maf.metricBundles as metricBundles
    import lsst.sims.maf.db as db
    from lsst.sims.maf.plots import PlotHandler
    import matplotlib.pylab as plt
    import healpy as hp


    print 'Camera setting = ', useCamera

    database = 'enigma_1189_sqlite.db'
    sqlWhere = 'filter = "r" and night < 800 and fieldRA < %f and fieldDec > %f and fieldDec < 0' % (np.radians(15), np.radians(-15))
    opsdb = db.OpsimDatabase(database)
    outDir = 'Camera'
    resultsDb = db.ResultsDb(outDir=outDir)

    nside=512
    tag = 'F'
    if useCamera:
        tag='T'
    metric = metrics.CountMetric('expMJD', metricName='chipgap_%s'%tag)

    slicer = slicers.HealpixSlicer(nside=nside, useCamera=useCamera)
    bundle1 = metricBundles.MetricBundle(metric,slicer,sqlWhere)

    bg = metricBundles.MetricBundleGroup({0:bundle1},opsdb, outDir=outDir, resultsDb=resultsDb)
    bg.runAll()
    hp.gnomview(bundle1.metricValues, xsize=800,ysize=800, rot=(7,-7,0), unit='Count', min=1)
    plt.savefig(outDir+'/fig'+tag+'.png')
def compute_metric(params):
    """Function to execute the metric calculation when code is called from
    the commandline"""

    obsdb = db.OpsimDatabase('../../tutorials/baseline2018a.db')
    outputDir = '/home/docmaf/'
    resultsDb = db.ResultsDb(outDir=outputDir)

    (propids, proptags) = obsdb.fetchPropInfo()
    surveyWhere = obsdb.createSQLWhere(params['survey'], proptags)

    obs_params = {
        'filters': params['filters'],
        'cadence': params['cadence'],
        'start_date': params['start_date'],
        'end_date': params['end_date']
    }

    metric = CadenceOverVisibilityWindowMetric(**obs_params)
    slicer = slicers.HealpixSlicer(nside=64)
    sqlconstraint = surveyWhere
    bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint)

    bgroup = metricBundles.MetricBundleGroup({0: bundle},
                                             obsdb,
                                             outDir='newmetric_test',
                                             resultsDb=resultsDb)
    bgroup.runAll()
Esempio n. 10
0
def load_and_run():
    dbFile = 'baseline_nexp2_v1.7_10yrs.db'
    opsimdb = db.OpsimDatabase(dbFile)
    runName = dbFile.replace('.db', '')

    nside = 64
    slicer = slicers.HealpixSlicer(nside=nside)

    metric = SNNSNMetric(verbose=False)  #, zlim_coeff=0.98)

    bundleList = []

    #sql = ''
    sql = '(note = "%s")' % ('DD:COSMOS')

    bundleList.append(
        metricBundles.MetricBundle(metric, slicer, sql, runName=runName))

    outDir = 'temp'
    resultsDb = db.ResultsDb(outDir=outDir)
    bundleDict = metricBundles.makeBundlesDictFromList(bundleList)
    bgroup = metricBundles.MetricBundleGroup(bundleDict,
                                             opsimdb,
                                             outDir=outDir,
                                             resultsDb=resultsDb)
    bgroup.runAll()
    bgroup.plotAll()
Esempio n. 11
0
def run(bdict, opsdb, colmap, args):
    resultsDb = db.ResultsDb(outDir=args.outDir)
    group = mb.MetricBundleGroup(bdict, opsdb, outDir=args.outDir, resultsDb=resultsDb)
    group.runAll()
    group.plotAll()
    resultsDb.close()
    mafUtils.writeConfigs(opsdb, args.outDir)
Esempio n. 12
0
def run_glance(outDir, dbname):

    conn = db.Database(dbname, defaultTable='observations')
    resultsDb = db.ResultsDb(outDir=outDir)
    mbg = MetricBundleGroup(bd, conn, outDir=outDir, resultsDb=resultsDb)
    mbg.runAll()
    mbg.plotAll()
    conn.close()
Esempio n. 13
0
def replot(bdict, opsdb, colmap, args):
    resultsDb = db.ResultsDb(outDir=args.outDir)
    group = mb.MetricBundleGroup(bdict,
                                 opsdb,
                                 outDir=args.outDir,
                                 resultsDb=resultsDb)
    group.readAll()
    group.plotAll()
    resultsDb.close()
Esempio n. 14
0
 def testDbCreation(self):
     # Test default sqlite file created.
     tempdir = tempfile.mkdtemp(prefix='resDb')
     resultsdb = db.ResultsDb(outDir=tempdir)
     self.assertTrue(os.path.isfile(os.path.join(tempdir, 'resultsDb_sqlite.db')))
     resultsdb.close()
     # Test that get appropriate exception if directory doesn't exist.
     sqlitefilename = os.path.join(self.outDir + 'test', 'testDb_sqlite.db')
     self.assertRaises(ValueError, db.ResultsDb, database=sqlitefilename)
     shutil.rmtree(tempdir)
Esempio n. 15
0
def replotSlew(opsdb, colmap, args):
    print('Only replots slew basics batch.')
    bdict = batches.slewBasics(colmap, args.runName)
    resultsDb = db.ResultsDb(outDir=args.outDir)
    group = mb.MetricBundleGroup(bdict,
                                 opsdb,
                                 outDir=args.outDir,
                                 resultsDb=resultsDb)
    group.readAll()
    group.plotAll()
    resultsDb.close()
Esempio n. 16
0
def run(bdict, opsdb, colmap, args, save=True):
    resultsDb = db.ResultsDb(outDir=args.outDir)
    group = mb.MetricBundleGroup(bdict,
                                 opsdb,
                                 outDir=args.outDir,
                                 resultsDb=resultsDb,
                                 saveEarly=False)
    if save:
        group.runAll()
        group.plotAll()
    else:
        group.runAll(clearMemory=True, plotNow=True)
    resultsDb.close()
    mafUtils.writeConfigs(opsdb, args.outDir)
Esempio n. 17
0
def runGlance(dbfile, outDir='Glance', runName='runname', camera='LSST'):
    conn = db.Database(dbfile, defaultTable='observations')
    colmap = {'ra': 'RA', 'dec': 'dec', 'mjd': 'mjd',
              'exptime': 'exptime', 'visittime': 'exptime', 'alt': 'alt',
              'az': 'az', 'filter': 'filter', 'fiveSigmaDepth': 'fivesigmadepth',
              'night': 'night', 'slewtime': 'slewtime', 'seeingGeom': 'FWHM_geometric',
              'rotSkyPos': 'rotSkyPos', 'raDecDeg': True, 'slewdist': None,
              'note': 'note'}

    gb = glanceBatch(colmap=colmap, slicer_camera=camera)
    resultsDb = db.ResultsDb(outDir=outDir)

    group = metricBundles.MetricBundleGroup(gb, conn, outDir=outDir, resultsDb=resultsDb)

    group.runAll()
    group.plotAll()
Esempio n. 18
0
def runSlew(opsdb, colmap, args):
    resultsDb = db.ResultsDb(outDir=args.outDir)
    bdict = batches.slewBasics(colmap, args.runName)
    dbTable = None
    group = mb.MetricBundleGroup(bdict,
                                 opsdb,
                                 outDir=args.outDir,
                                 resultsDb=resultsDb,
                                 dbTable=dbTable)
    group.runAll()
    group.plotAll()
    if 'slewStatesTable' in colmap:
        bdict = batches.slewAngles(colmap, args.runName)
        dbTable = colmap['slewStatesTable']
        group = mb.MetricBundleGroup(bdict,
                                     opsdb,
                                     outDir=args.outDir,
                                     resultsDb=resultsDb,
                                     dbTable=dbTable)
        group.runAll()
        group.plotAll()
    if 'slewSpeedsTable' in colmap:
        bdict = batches.slewSpeeds(colmap, args.runName)
        dbTable = colmap['slewSpeedsTable']
        group = mb.MetricBundleGroup(bdict,
                                     opsdb,
                                     outDir=args.outDir,
                                     resultsDb=resultsDb,
                                     dbTable=dbTable)
        group.runAll()
        group.plotAll()
    if 'slewActivitiesTable' in colmap:
        nslews = opsdb.fetchTotalSlewN()
        bdict = batches.slewActivities(colmap, args.runName, totalSlewN=nslews)
        dbTable = colmap['slewActivitiesTable']
        group = mb.MetricBundleGroup(bdict,
                                     opsdb,
                                     outDir=args.outDir,
                                     resultsDb=resultsDb,
                                     dbTable=dbTable)
        group.runAll()
        group.plotAll()
    resultsDb.close()
    mafUtils.writeConfigs(opsdb, args.outDir)
Esempio n. 19
0
    def testOut(self):
        """
        Check that the metric bundle can generate the expected output
        """
        nside = 8
        slicer = slicers.HealpixSlicer(nside=nside)
        metric = metrics.MeanMetric(col='airmass')
        sql = 'filter="r"'
        stacker1 = stackers.RandomDitherFieldPerVisitStacker()
        stacker2 = stackers.GalacticStacker()
        map1 = maps.GalCoordsMap()
        map2 = maps.StellarDensityMap()

        metricB = metricBundles.MetricBundle(metric,
                                             slicer,
                                             sql,
                                             stackerList=[stacker1, stacker2],
                                             mapsList=[map1, map2])
        database = os.path.join(getPackageDir('sims_data'), 'OpSimData',
                                'astro-lsst-01_2014.db')

        opsdb = db.OpsimDatabaseV4(database=database)
        resultsDb = db.ResultsDb(outDir=self.outDir)

        bgroup = metricBundles.MetricBundleGroup({0: metricB},
                                                 opsdb,
                                                 outDir=self.outDir,
                                                 resultsDb=resultsDb)
        bgroup.runAll()
        bgroup.plotAll()
        bgroup.writeAll()

        opsdb.close()

        outThumbs = glob.glob(os.path.join(self.outDir, 'thumb*'))
        outNpz = glob.glob(os.path.join(self.outDir, '*.npz'))
        outPdf = glob.glob(os.path.join(self.outDir, '*.pdf'))

        # By default, make 3 plots for healpix
        assert (len(outThumbs) == 3)
        assert (len(outPdf) == 3)
        assert (len(outNpz) == 1)
Esempio n. 20
0
    def testOut(self):
        """
        Check that the metric bundle can generate the expected output
        """
        nside = 8
        slicer = slicers.HealpixSlicer(nside=nside)
        metric = metrics.MeanMetric(col='airmass')
        sql = 'filter="r"'
        stacker1 = stackers.RandomDitherFieldPerVisitStacker()
        stacker2 = stackers.GalacticStacker()
        map1 = maps.GalCoordsMap()
        map2 = maps.StellarDensityMap()

        metricB = metricBundles.MetricBundle(metric,
                                             slicer,
                                             sql,
                                             stackerList=[stacker1, stacker2])
        filepath = os.path.join(os.getenv('SIMS_MAF_DIR'), 'tests/')

        database = os.path.join(filepath, 'opsimblitz1_1133_sqlite.db')
        opsdb = db.OpsimDatabase(database=database)
        resultsDb = db.ResultsDb(outDir=self.outDir)

        bgroup = metricBundles.MetricBundleGroup({0: metricB},
                                                 opsdb,
                                                 outDir=self.outDir,
                                                 resultsDb=resultsDb)
        bgroup.runAll()
        bgroup.plotAll()
        bgroup.writeAll()

        outThumbs = glob.glob(os.path.join(self.outDir, 'thumb*'))
        outNpz = glob.glob(os.path.join(self.outDir, '*.npz'))
        outPdf = glob.glob(os.path.join(self.outDir, '*.pdf'))

        # By default, make 3 plots for healpix
        assert (len(outThumbs) == 3)
        assert (len(outPdf) == 3)
        assert (len(outNpz) == 1)
Esempio n. 21
0
 def setUp(self):
     self.outDir = 'Out'
     self.metricName = 'Count ExpMJD'
     self.slicerName = 'OneDSlicer'
     self.runName = 'fakeopsim'
     self.constraint = ''
     self.metadata = 'Dithered'
     self.metricDataFile = 'testmetricdatafile.npz'
     self.plotType = 'BinnedData'
     self.plotName = 'testmetricplot_BinnedData.png'
     self.summaryStatName1 = 'Mean'
     self.summaryStatValue1 = 20
     self.summaryStatName2 = 'Median'
     self.summaryStatValue2 = 18
     self.tempdir = tempfile.mkdtemp(prefix='resDb')
     self.resultsDb = db.ResultsDb(self.tempdir)
     self.metricId = self.resultsDb.updateMetric(self.metricName, self.slicerName,
                                                 self.runName, self.constraint,
                                                 self.metadata, self.metricDataFile)
     self.resultsDb.updatePlot(self.metricId, self.plotType, self.plotName)
     self.resultsDb.updateSummaryStat(self.metricId, self.summaryStatName1, self.summaryStatValue1)
     self.resultsDb.updateSummaryStat(self.metricId, self.summaryStatName2, self.summaryStatValue2)
Esempio n. 22
0
def getResultsDbs(resultDbPath):
    """Create a dictionary of resultDb from resultDb files

    Args:
        resultDbPath(str): Path to the directory storing the result databases
            generated by MAF.

    Returns:
        resultDbs(dict): A dictionary containing the ResultDb objects
            reconstructed from result databases in the provided directory.
    """

    resultDbs = {}
    resultDbList = glob.glob(os.path.join(resultDbPath, '*_result.db'))
    for resultDb in resultDbList:
        runName = os.path.basename(resultDb).rsplit('_', 1)[0]
        resultDb = db.ResultsDb(database=resultDb)

        # Don't add empty results.db file,
        if len(resultDb.getAllMetricIds()) > 0:
            resultDbs[runName] = resultDb

    return resultDbs
Esempio n. 23
0
        " Default=fieldRA.")
    parser.add_argument(
        "--latCol",
        type=str,
        default='fieldDec',
        help="Column to use for Dec values (can be a stacker dither column)." +
        " Default=fieldDec.")
    parser.add_argument('--night', type=int, default=1)

    parser.set_defaults()
    args, extras = parser.parse_known_args()

    bundleDict = makeBundleList(args.dbFile,
                                nside=args.nside,
                                lonCol=args.lonCol,
                                latCol=args.latCol,
                                night=args.night)

    # Set up / connect to resultsDb.
    resultsDb = db.ResultsDb(outDir=args.outDir)
    # Connect to opsimdb.
    opsdb = db.OpsimDatabaseV3(args.dbFile)

    # Set up metricBundleGroup.
    group = metricBundles.MetricBundleGroup(bundleDict,
                                            opsdb,
                                            outDir=args.outDir,
                                            resultsDb=resultsDb)
    group.runAll()
    group.plotAll()
Esempio n. 24
0
                        help="H value at which to calculate cumulative/differential completeness, etc."
                             "Default (None) will be set to plotDict value or median of H range.")
    parser.add_argument("--nYearsMax", type=int, default=10,
                        help="Maximum number of years out to which to evaluate completeness."
                             "Default 10.")
    parser.add_argument("--startTime", type=float, default=59853,
                        help="Time at start of survey (to set time for summary metrics).")
    args = parser.parse_args()

    # Default parameters for metric setup.
    stepsize = 365/6.
    times = np.arange(0, args.nYearsMax*365 + stepsize/2, stepsize)
    times += args.startTime

    # Create a results Db.
    resultsDb = db.ResultsDb(outDir=args.workDir)

    # Just read in all metrics in the (joint or single) directory, then run completeness and fraction
    # summaries, using the methods in the batches.
    if args.metadata is None:
        matchstring = os.path.join(args.workDir, '*MOOB.npz')
    else:
        matchstring = os.path.join(args.workDir, f'*{args.metadata}*MOOB.npz')
    metricfiles = glob.glob(matchstring)
    metricNames = []
    for m in metricfiles:
        mname = os.path.split(m)[-1].rstrip('_MOOB.npz')
        metricNames.append(mname)

    bdict = {}
    for mName, mFile in zip(metricNames, metricfiles):
    if args.db is None:
        if os.path.isfile('trackingDb_sqlite.db'):
            os.remove('trackingDb_sqlite.db')
        db_files = glob.glob('*.db')
    else:
        db_files = [args.db]
    run_names = [
        os.path.basename(name).replace('.db', '') for name in db_files
    ]

    for filename, name in zip(db_files, run_names):
        if os.path.isdir(name + '_glance'):
            shutil.rmtree(name + '_glance')
        opsdb = db.OpsimDatabaseV4(filename)
        colmap = batches.ColMapDict()

        bdict = {}
        bdict.update(batches.glanceBatch(colmap, name))
        bdict.update(batches.fOBatch(colmap, name))
        resultsDb = db.ResultsDb(outDir=name + '_glance')
        group = mb.MetricBundleGroup(bdict,
                                     opsdb,
                                     outDir=name + '_glance',
                                     resultsDb=resultsDb,
                                     saveEarly=False)
        group.runAll(clearMemory=True, plotNow=True)
        resultsDb.close()
        opsdb.close()
        db.addRunToDatabase(name + '_glance', 'trackingDb_sqlite.db', None,
                            name, '', '', name + '.db')
Esempio n. 26
0
def go(nside=64, rmag=21., SedTemplate='flat', DoRun=False, LFilters = [], \
           LNightMax=[], nightMax=1e4, \
           CustomPlotLimits=True, \
           RunOne=False, MaxRuns=1e3, \
           SpatialClip=95., \
           seeingCol='FWHMeff', \
           sCmap='cubehelix_r', \
           checkCorrKind=False, \
           wfdPlane=True, \
           useGRIZ=False):

    # Go to the directory where the sqlite databases are held...

    # cd /Users/clarkson/Data/LSST/OpSimRuns/opsim20160411


    # WIC 2015-12-29 - set up for a master-run with all cases, this time with plotting limits
    # Break the specifications across lines to make subdivision easier
    
    # Subsets by time first, then by filter, finally the whole shebang

    # 2016-04-23 - replaced enigma_1189 --> minion_1016
    # 2016-04-23 - replaced ops2_1092 --> minion_1020
    
    
    # (Yes the inversion of the first two is deliberate.)
    runNames = ['minion_1016', 'minion_1020', 'minion_1020', 'minion_1016', \
                    'minion_1020', 'minion_1016', 'minion_1020', 'minion_1016', \
                    'minion_1020', 'minion_1016']
    LFilters = ['', '', '', '', \
                    'u', 'u', 'y', 'y', \
                    '', '']  
    LNightMax = [365, 365, 730, 730, \
                     1e4, 1e4, 1e4, 1e4, \
                     1e4, 1e4]

    # WIC try again, this time on the new astro_lsst_01_1004 only
    if wfdPlane:
        LFilters = ['', '', '', 'u', 'y']  
        LNightMax = [365, 730, 1e4, 1e4, 1e4]
        runNames = ['astro_lsst_01_1004' for i in range (len(LFilters)) ]

    # WIC 2016-05-01 check correlation
    if checkCorrKind:
        LFilters = ['', '']
        LNightMax = [365, 365]
        runNames = ['minion_1016', 'minion_1016']

        # Type of correlation used for HA Degen 
        # checkCorrKind = True
        useSpearmanR = [False, True]
    
    if useGRIZ:
        runNames=['minion_1016','astro_lsst_01_1004', 'minion_1020']
        LFilters = ['griz' for iRun in range(len(runNames)) ]
        #LNightMax = [1e4 for iRun in range(len(runNames)) ]
        #LNightMax = [730 for iRun in range(len(runNames)) ]
        LNightMax = [365 for iRun in range(len(runNames)) ]


    # List of upper limits to parallax and proper motion error. For parallax, 3.0 mas is probably good
    LUpperParallax = []
    LUpperPropmotion = []


    if CustomPlotLimits:
    
        LUpperParallax = [10, 10, 10, 10, \
                              10, 10, 40, 40, \
                              3.0, 3.0 ]

    
        # For proper motion, it's a little tricky to say because the
        # regular case is so pathological for the field. Try the following:
        LUpperPropmotion = [40, 40, 5, 20, \
                                3.5, 20, 3.5, 20, \
                                0.5, 5]

        if len(runNames) < 2:
            LUpperPropmotion = [100 for i in range(len(runNames))]

    print "runAstrom.go INFO - will run the following:"
    for iSho in range(len(runNames)):
        sFilThis = ''
        # print iSho, len(LFilters)
        if iSho <= len(LFilters):
            sFilThis = sqlFromFilterString(LFilters[iSho])

        print "%i: %-12s, %1s, %i, sqlFilter -- %s" % (iSho, runNames[iSho], LFilters[iSho], LNightMax[iSho], sFilThis)
    print "==========================="

    print "mag max = %.2f" % (rmag)
    print "---------------------------"

#    print runNames
#    if not DoRun:
#        print "Set DoRun=True to actually run this."
#        print len(LFilters), len(runNames), len(LFilters) == len(runNames)
#        return

#'kraken_1038', 'kraken_1034', 'ops2_1098']


    # nside = 64

    slicer = slicers.HealpixSlicer(nside=nside)

    # Make it so we don't bother with the silly power spectra
    plotFuncs = [plots.HealpixSkyMap(), plots.HealpixHistogram()]

    # WIC - back up the plotting arguments with a default value
    plotFuncsPristine = copy.deepcopy(plotFuncs)

    # WIC - the only way this will make sense to me is if I make a
    # dictionary of plot arguments. Let's try it...
    DPlotArgs = {}
    for plotArg in ['parallax', 'propmotion', 'coverage', 'HAdegen']:
        DPlotArgs[plotArg] = copy.deepcopy(plotFuncs)

    if CustomPlotLimits:

        # Use the same color map for all the metrics
        for plotMetric in DPlotArgs.keys():
            DPlotArgs[plotMetric][0].defaultPlotDict['cmap'] = sCmap


        # Apply spatial clipping for all but the HADegen, for which we
        # have other limits...
        for plotMetric in ['parallax', 'propmotion', 'coverage']:
            DPlotArgs[plotMetric][0].defaultPlotDict['percentileClip'] = SpatialClip

        # Some limits common to spatial maps and histograms
        for iPl in range(0,2):
            DPlotArgs['propmotion'][iPl].defaultPlotDict['logScale'] = True

        # NOT a loop because we might want to separate out the behavior

        # Standardized range for the histograms for new parallax metrics
        DPlotArgs['coverage'][1].defaultPlotDict['xMin'] = 0.
        DPlotArgs['coverage'][1].defaultPlotDict['xMax'] = 1.
        DPlotArgs['HAdegen'][1].defaultPlotDict['xMin'] = -1.
        DPlotArgs['HAdegen'][1].defaultPlotDict['xMax'] =  1.
            
        # Standardize the sky map for the HAdegen as well.
        DPlotArgs['coverage'][1].defaultPlotDict['xMin'] = 0.
        DPlotArgs['coverage'][1].defaultPlotDict['xMax'] = 1.
        DPlotArgs['HAdegen'][0].defaultPlotDict['xMin'] = -1.
        DPlotArgs['HAdegen'][0].defaultPlotDict['xMax'] =  1.


        # Standardize at least the lower bound of the histogram in
        # both the proper motion and parallax errors. Upper limit we
        # can customize with a loop.
        DPlotArgs['propmotion'][1].defaultPlotDict['xMin'] = 1e-2  # should not be zero if log scale!!
        DPlotArgs['parallax'][1].defaultPlotDict['xMin'] = 0.


    # WIC - try changing the plot dictionary

    if not DoRun:
        plotFuncs[0].defaultPlotDict['logScale'] = True
        print DPlotArgs['propmotion'][0].defaultPlotDict
        print DPlotArgs['propmotion'][1].defaultPlotDict
        
        return

    # The old runs have the seeing in finSeeing
    #seeingCol = 'finSeeing'

    ### UPDATE THE SEEING COLUMN
    #seeingCol = 'FWHMeff'   ## Moved up to a command-line argument


    # Use all the observations. Can change if you want a different
    # time span
    # sqlconstraint = ''

    # list of sqlconstraints now used, which gets handled within the loop.

    # run some summary stats on everything
    summaryMetrics = [metrics.MedianMetric()]

    tStart = time.time()

    # Running one, or the whole lot?
    RunMax = len(runNames)

    # allow user to set a different number (say, 2)
    if MaxRuns < RunMax and MaxRuns > 0:
        RunMax = int(MaxRuns)

    # the following keyword overrides
    if RunOne:
        RunMax = 1

    print "Starting runs. RunMax = %i" % (RunMax)

    for iRun in range(RunMax):
        run = runNames[iRun][:]

    # for run in runNames:
        # Open the OpSim database
        timeStartIteration = time.time()

        # Some syntax added to test for existence of the database
        dbFil = run+'_sqlite.db'
        if not os.access(dbFil, os.R_OK):
            print "runAstrom.go FATAL - cannot acces db file %s" % (dbFil)
            print "runAstrom.go FATAL - skipping run %s" % (run)
            continue
    
        else:
            deltaT = time.time()-tStart
            print "runAstrom.go INFO - ##################################"
            print "runAstrom.go INFO - starting run %s with nside=%i after %.2f minutes" \
                % (run, nside, deltaT/60.)

        opsdb = db.OpsimDatabase(run+'_sqlite.db')

        # Set SQL constraint appropriate for each filter in the
        # list. If we supplied a list of filters, use it for 
        sqlconstraint = ''
        ThisFilter = 'ugrizy'
        if len(LFilters) == len(runNames):

            # Only change the filter if one was actually supplied!
            if len(LFilters[iRun]) > 0:
                ThisFilter = LFilters[iRun]

                sqlconstraint = sqlFromFilterString(ThisFilter)

###                sqlconstraint = 'filter = "%s"' % (ThisFilter)


        # If nightmax was supplied, use it 
        ThisNightMax = int(nightMax)  # copy not view
        if len(LNightMax) == len(runNames):

            # Only update nightmax if one was given
            try:
                ThisNightMax = int(LNightMax[iRun])  # This might be redundant with the fmt statement below.
                if len(sqlconstraint) < 1:
                    sqlconstraint = 'night < %i' % (ThisNightMax)
                else:
                    sqlconstraint = '%s and night < %i' % (sqlconstraint, ThisNightMax)
            except:
                print "runAstrom.go WARN - run %i problem with NightMax" % (iRun)
                dumdum = 1.

        # Set where the output should go - include the filter!! 
        sMag = '%.1f' % (rmag)
        sMag = sMag.replace(".","p")
        outDir = './metricEvals/%s_nside%i_%s_n%i_r%s' % (run, nside, ThisFilter, ThisNightMax, sMag)

        # Ensure we'll be able to find this later on...
        if CustomPlotLimits:
            outDir = '%s_lims' % (outDir)

        # if we are testing the kind of correlation used, include that
        # in the output here.
        if checkCorrKind:
            if useSpearmanR[iRun]:
                sCorr = 'spearmanR'
            else:
                sCorr = 'pearsonR'
        
            outDir = '%s_%s' % (outDir, sCorr)

        # From this point onwards, stuff actually gets run. This is
        # the place to output what will actually happen next.
        print "runAstrom.go INFO - about to run:"
        print "runAstrom.go INFO - sqlconstraint: %s ; run name %s ; nside %i" % (sqlconstraint, run, nside)
        print "runAstrom.go INFO - output directory will be %s" % (outDir)
        if not DoRun:
            continue

        # ensure the output directory actually exists...
        if not os.access(outDir, os.R_OK):
            print "runAstrom.go INFO - creating output directory %s" % (outDir)
            os.makedirs(outDir)


        resultsDb = db.ResultsDb(outDir=outDir)
        bundleList = []

        # WIC - to make this at least somewhat uniform, build the plot
        # functions including arguments out of our copies above.
        plotFuncsPropmotion = copy.deepcopy(DPlotArgs['propmotion'])
        plotFuncsParallax = copy.deepcopy(DPlotArgs['parallax'])
        plotFuncsCoverage = copy.deepcopy(DPlotArgs['coverage'])
        plotFuncsHAdegen = copy.deepcopy(DPlotArgs['HAdegen'])

        # if using custom plot limits, will want to include the limits
        # for proper motion and parallax too... programming a bit defensively 
        # here, including an extra check (rather than just the length of the lists 
        # above). 
        if CustomPlotLimits:
            if len(LUpperParallax) == len(runNames):
                plotFuncsParallax[1].defaultPlotDict['xMax'] = float(LUpperParallax[iRun])

            if len(LUpperPropmotion) == len(runNames):
                plotFuncsPropmotion[1].defaultPlotDict['xMax'] = float(LUpperPropmotion[iRun])

        # Configure the metrics
        metric = metrics.ParallaxMetric(rmag=rmag, seeingCol=seeingCol, SedTemplate=SedTemplate)
        bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, runName=run,
#                                            plotFuncs=plotFuncs, \
                                                plotFuncs = plotFuncsParallax, \
                                                summaryMetrics=summaryMetrics)
        bundleList.append(bundle)

        metric=metrics.ProperMotionMetric(rmag=rmag, seeingCol=seeingCol, SedTemplate=SedTemplate)
        bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, runName=run,
#                                            plotFuncs=plotFuncs, \
                                                plotFuncs=plotFuncsPropmotion, \
                                                summaryMetrics=summaryMetrics)
        bundleList.append(bundle)

        metric = calibrationMetrics.ParallaxCoverageMetric(rmag=rmag, seeingCol=seeingCol, SedTemplate=SedTemplate)
        bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, runName=run,
#                                            plotFuncs=plotFuncs, \
                                                plotFuncs=plotFuncsCoverage, \
                                                summaryMetrics=summaryMetrics)
        bundleList.append(bundle)

        # Now for the HA Degen metric. If testing the type of
        # correlation, call the metric differently here. Since the
        # argument to actually do this is only part of my github fork
        # at the moment, we use a different call. Running with default
        # arguments (checkCorrKind=False) should then work without
        # difficulty.
        metric = calibrationMetrics.ParallaxHADegenMetric(rmag=rmag, seeingCol=seeingCol, SedTemplate=SedTemplate)
        if checkCorrKind:
            metric = calibrationMetrics.ParallaxHADegenMetric(rmag=rmag, seeingCol=seeingCol, SedTemplate=SedTemplate, useSpearmanR=useSpearmanR[iRun])
            print "TESTING CORRELATION KIND -- useSpearmanR", useSpearmanR[iRun]
            

        bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, runName=run,
#                                            plotFuncs=plotFuncs, \
                                                plotFuncs=plotFuncsHAdegen, \
                                                summaryMetrics=summaryMetrics)
        bundleList.append(bundle)

        # Run everything and make plots
        bundleDict = metricBundles.makeBundlesDictFromList(bundleList)
        bgroup = metricBundles.MetricBundleGroup(bundleDict, opsdb, outDir=outDir, resultsDb=resultsDb)
#        try:
        bgroup.runAll()

        print "runAstrom.go INFO - bundles took %.2f minutes" \
            % ((time.time() - timeStartIteration) / 60.)

#        except KeyboardInterrupt:
#            print "runAstrom.go FATAL - keyboard interrupt detected. Halting."
#            return
        bgroup.plotAll()

        print "runAstrom.go INFO - bundles + plotting took %.2f minutes" \
            % ((time.time() - timeStartIteration) / 60.)
        

    print "Finished entire set. %i runs took %.2f minutes." % (iRun + 1, (time.time()-tStart)/60.)
Esempio n. 27
0
def coaddM5Analysis(path,
                    dbfile,
                    runName,
                    slair=False,
                    WFDandDDFs=False,
                    noDithOnly=False,
                    bestDithOnly=False,
                    someDithOnly=False,
                    specifiedDith=None,
                    nside=128,
                    filterBand='r',
                    includeDustExtinction=False,
                    saveunMaskedCoaddData=False,
                    pixelRadiusForMasking=5,
                    cutOffYear=None,
                    plotSkymap=True,
                    plotCartview=True,
                    unmaskedColorMin=None,
                    unmaskedColorMax=None,
                    maskedColorMin=None,
                    maskedColorMax=None,
                    nTicks=5,
                    plotPowerSpectrum=True,
                    showPlots=True,
                    saveFigs=True,
                    almAnalysis=True,
                    raRange=[-50, 50],
                    decRange=[-65, 5],
                    saveMaskedCoaddData=True):
    """

    Analyze the artifacts induced in the coadded 5sigma depth due to imperfect observing strategy.
      - Creates an output directory for subdirectories containing the specified things to save.
      - Creates, shows, and saves comparison plots.
      - Returns the metricBundle object containing the calculated coadded depth, and the output directory name.

    Required Parameters
    -------------------
      * path: str: path to the main directory where output directory is to be saved.
      * dbfile: str: path to the OpSim output file, e.g. to a copy of enigma_1189
      * runName: str: run name tag to identify the output of specified OpSim output, e.g. 'enigma1189' 

    Optional Parameters
    -------------------
      * slair: boolean: set to True if analysis on a SLAIR output.
                        Default: False
      * WFDandDDFs: boolean: set to True if want to consider both WFD survet and DDFs. Otherwise will only work
                             with WFD. Default: False
      * noDithOnly: boolean: set to True if only want to consider the undithered survey. Default: False
      * bestDithOnly: boolean: set to True if only want to consider RandomDitherFieldPerVisit.
                               Default: False
      * someDithOnly: boolean: set to True if only want to consider undithered and a few dithered surveys. 
                               Default: False
      * specifiedDith: str: specific dither strategy to run.
                            Default: None
      * nside: int: HEALpix resolution parameter. Default: 128
      * filterBand: str: any one of 'u', 'g', 'r', 'i', 'z', 'y'. Default: 'r'
      * includeDustExtinction: boolean: set to include dust extinction. Default: False
      * saveunMaskedCoaddData: boolean: set to True to save data before border masking. Default: False
      * pixelRadiusForMasking: int: number of pixels to mask along the shallow border. Default: 5

      * cutOffYear: int: year cut to restrict analysis to only a subset of the survey. 
                         Must range from 1 to 9, or None for the full survey analysis (10 yrs).
                         Default: None
      * plotSkymap: boolean: set to True if want to plot skymaps. Default: True
      * plotCartview: boolean: set to True if want to plot cartview plots. Default: False
      * unmaskedColorMin: float: lower limit on the colorscale for unmasked skymaps. Default: None
      * unmaskedColorMax: float: upper limit on the colorscale for unmasked skymaps. Default: None

      * maskedColorMin: float: lower limit on the colorscale for border-masked skymaps. Default: None
      * maskedColorMax: float: upper limit on the colorscale for border-masked skymaps. Default: None
      * nTicks: int: (number of ticks - 1) on the skymap colorbar. Default: 5
      * plotPowerSpectrum: boolean: set to True if want to plot powerspectra. Default: True

      * showPlots: boolean: set to True if want to show figures. Default: True
      * saveFigs: boolean: set to True if want to save figures. Default: True
      
      * almAnalysis: boolean: set to True to perform the alm analysis. Default: True
      * raRange: float array: range of right ascention (in degrees) to consider in alm  cartview plot;
                              applicable when almAnalysis=True. Default: [-50,50]
      * decRange: float array: range of declination (in degrees) to consider in alm cartview plot; 
                               applicable when almAnalysis=True. Default: [-65,5]
      * saveMaskedCoaddData: boolean: set to True to save the coadded depth data after the border
                                      masking. Default: True

    """
    # ------------------------------------------------------------------------
    # read in the database
    if slair:
        # slair database
        opsdb = db.Database(dbfile, defaultTable='observations')
    else:
        # OpSim database
        opsdb = db.OpsimDatabase(dbfile)

    # ------------------------------------------------------------------------
    # set up the outDir
    zeropt_tag = ''
    if cutOffYear is not None: zeropt_tag = '%syearCut' % cutOffYear
    else: zeropt_tag = 'fullSurveyPeriod'

    if includeDustExtinction: dust_tag = 'withDustExtinction'
    else: dust_tag = 'noDustExtinction'

    regionType = ''
    if WFDandDDFs: regionType = 'WFDandDDFs_'

    outDir = 'coaddM5Analysis_%snside%s_%s_%spixelRadiusForMasking_%sBand_%s_%s_directory' % (
        regionType, nside, dust_tag, pixelRadiusForMasking, filterBand,
        runName, zeropt_tag)
    print('# outDir: %s' % outDir)
    resultsDb = db.ResultsDb(outDir=outDir)

    # ------------------------------------------------------------------------
    # set up the sql constraint
    if WFDandDDFs:
        if cutOffYear is not None:
            nightCutOff = (cutOffYear) * 365.25
            sqlconstraint = 'night<=%s and filter=="%s"' % (nightCutOff,
                                                            filterBand)
        else:
            sqlconstraint = 'filter=="%s"' % filterBand
    else:
        # set up the propID and units on the ra, dec
        if slair:  # no prop ID; only WFD is simulated.
            wfdWhere = ''
            raDecInDeg = True
        else:
            propIds, propTags = opsdb.fetchPropInfo()
            wfdWhere = '%s and ' % opsdb.createSQLWhere('WFD', propTags)
            raDecInDeg = opsdb.raDecInDeg
        # set up the year cutoff
        if cutOffYear is not None:
            nightCutOff = (cutOffYear) * 365.25
            sqlconstraint = '%snight<=%s and filter=="%s"' % (
                wfdWhere, nightCutOff, filterBand)
        else:
            sqlconstraint = '%sfilter=="%s"' % (wfdWhere, filterBand)
    print('# sqlconstraint: %s' % sqlconstraint)

    # ------------------------------------------------------------------------
    # setup all the slicers
    slicer = {}
    stackerList = {}

    if specifiedDith is not None:  # would like to add all the stackers first and then keep only the one that is specified
        bestDithOnly, noDithOnly = False, False

    if bestDithOnly:
        stackerList['RandomDitherFieldPerVisit'] = [
            mafStackers.RandomDitherFieldPerVisitStacker(degrees=raDecInDeg,
                                                         randomSeed=1000)
        ]
        slicer['RandomDitherFieldPerVisit'] = slicers.HealpixSlicer(
            lonCol='randomDitherFieldPerVisitRa',
            latCol='randomDitherFieldPerVisitDec',
            latLonDeg=raDecInDeg,
            nside=nside,
            useCache=False)
    else:
        if slair:
            slicer['NoDither'] = slicers.HealpixSlicer(lonCol='RA',
                                                       latCol='dec',
                                                       latLonDeg=raDecInDeg,
                                                       nside=nside,
                                                       useCache=False)
        else:
            slicer['NoDither'] = slicers.HealpixSlicer(lonCol='fieldRA',
                                                       latCol='fieldDec',
                                                       latLonDeg=raDecInDeg,
                                                       nside=nside,
                                                       useCache=False)
        if someDithOnly and not noDithOnly:
            #stackerList['RepulsiveRandomDitherFieldPerVisit'] = [myStackers.RepulsiveRandomDitherFieldPerVisitStacker(degrees=raDecInDeg,
            #                                                                                                          randomSeed=1000)]
            #slicer['RepulsiveRandomDitherFieldPerVisit'] = slicers.HealpixSlicer(lonCol='repulsiveRandomDitherFieldPerVisitRa',
            #                                                                    latCol='repulsiveRandomDitherFieldPerVisitDec',
            #                                                                    latLonDeg=raDecInDeg, nside=nside,
            #                                                                    useCache=False)
            slicer['SequentialHexDitherFieldPerNight'] = slicers.HealpixSlicer(
                lonCol='hexDitherFieldPerNightRa',
                latCol='hexDitherFieldPerNightDec',
                latLonDeg=raDecInDeg,
                nside=nside,
                useCache=False)
            slicer['PentagonDitherPerSeason'] = slicers.HealpixSlicer(
                lonCol='pentagonDitherPerSeasonRa',
                latCol='pentagonDitherPerSeasonDec',
                latLonDeg=raDecInDeg,
                nside=nside,
                useCache=False)
        elif not noDithOnly:
            # random dithers on different timescales
            stackerList['RandomDitherPerNight'] = [
                mafStackers.RandomDitherPerNightStacker(degrees=raDecInDeg,
                                                        randomSeed=1000)
            ]
            stackerList['RandomDitherFieldPerNight'] = [
                mafStackers.RandomDitherFieldPerNightStacker(
                    degrees=raDecInDeg, randomSeed=1000)
            ]
            stackerList['RandomDitherFieldPerVisit'] = [
                mafStackers.RandomDitherFieldPerVisitStacker(
                    degrees=raDecInDeg, randomSeed=1000)
            ]

            # rep random dithers on different timescales
            #stackerList['RepulsiveRandomDitherPerNight'] = [myStackers.RepulsiveRandomDitherPerNightStacker(degrees=raDecInDeg,
            #                                                                                                randomSeed=1000)]
            #stackerList['RepulsiveRandomDitherFieldPerNight'] = [myStackers.RepulsiveRandomDitherFieldPerNightStacker(degrees=raDecInDeg,
            #                                                                                                          randomSeed=1000)]
            #stackerList['RepulsiveRandomDitherFieldPerVisit'] = [myStackers.RepulsiveRandomDitherFieldPerVisitStacker(degrees=raDecInDeg,
            #                                                                                                          randomSeed=1000)]
            # set up slicers for different dithers
            # random dithers on different timescales
            slicer['RandomDitherPerNight'] = slicers.HealpixSlicer(
                lonCol='randomDitherPerNightRa',
                latCol='randomDitherPerNightDec',
                latLonDeg=raDecInDeg,
                nside=nside,
                useCache=False)
            slicer['RandomDitherFieldPerNight'] = slicers.HealpixSlicer(
                lonCol='randomDitherFieldPerNightRa',
                latCol='randomDitherFieldPerNightDec',
                latLonDeg=raDecInDeg,
                nside=nside,
                useCache=False)
            slicer['RandomDitherFieldPerVisit'] = slicers.HealpixSlicer(
                lonCol='randomDitherFieldPerVisitRa',
                latCol='randomDitherFieldPerVisitDec',
                latLonDeg=raDecInDeg,
                nside=nside,
                useCache=False)
            # rep random dithers on different timescales
            #slicer['RepulsiveRandomDitherPerNight'] = slicers.HealpixSlicer(lonCol='repulsiveRandomDitherPerNightRa',
            #                                                               latCol='repulsiveRandomDitherPerNightDec',
            #                                                               latLonDeg=raDecInDeg, nside=nside, useCache=False)
            #slicer['RepulsiveRandomDitherFieldPerNight'] = slicers.HealpixSlicer(lonCol='repulsiveRandomDitherFieldPerNightRa',
            #                                                                    latCol='repulsiveRandomDitherFieldPerNightDec',
            #                                                                    latLonDeg=raDecInDeg, nside=nside,
            #                                                                    useCache=False)
            #slicer['RepulsiveRandomDitherFieldPerVisit'] = slicers.HealpixSlicer(lonCol='repulsiveRandomDitherFieldPerVisitRa',
            #                                                                    latCol='repulsiveRandomDitherFieldPerVisitDec',
            #                                                                    latLonDeg=raDecInDeg, nside=nside,
            #                                                                    useCache=False)
            # spiral dithers on different timescales
            slicer['FermatSpiralDitherPerNight'] = slicers.HealpixSlicer(
                lonCol='fermatSpiralDitherPerNightRa',
                latCol='fermatSpiralDitherPerNightDec',
                latLonDeg=raDecInDeg,
                nside=nside,
                useCache=False)
            slicer['FermatSpiralDitherFieldPerNight'] = slicers.HealpixSlicer(
                lonCol='fermatSpiralDitherFieldPerNightRa',
                latCol='fermatSpiralDitherFieldPerNightDec',
                latLonDeg=raDecInDeg,
                nside=nside,
                useCache=False)
            slicer['FermatSpiralDitherFieldPerVisit'] = slicers.HealpixSlicer(
                lonCol='fermatSpiralDitherFieldPerVisitRa',
                latCol='fermatSpiralDitherFieldPerVisitDec',
                latLonDeg=raDecInDeg,
                nside=nside,
                useCache=False)
            # hex dithers on different timescales
            slicer['SequentialHexDitherPerNight'] = slicers.HealpixSlicer(
                lonCol='hexDitherPerNightRa',
                latCol='hexDitherPerNightDec',
                latLonDeg=raDecInDeg,
                nside=nside,
                useCache=False)
            slicer['SequentialHexDitherFieldPerNight'] = slicers.HealpixSlicer(
                lonCol='hexDitherFieldPerNightRa',
                latCol='hexDitherFieldPerNightDec',
                latLonDeg=raDecInDeg,
                nside=nside,
                useCache=False)
            slicer['SequentialHexDitherFieldPerVisit'] = slicers.HealpixSlicer(
                lonCol='hexDitherFieldPerVisitRa',
                latCol='hexDitherFieldPerVisitDec',
                latLonDeg=raDecInDeg,
                nside=nside,
                useCache=False)
            # per season dithers
            slicer['PentagonDitherPerSeason'] = slicers.HealpixSlicer(
                lonCol='pentagonDitherPerSeasonRa',
                latCol='pentagonDitherPerSeasonDec',
                latLonDeg=raDecInDeg,
                nside=nside,
                useCache=False)
            slicer['PentagonDiamondDitherPerSeason'] = slicers.HealpixSlicer(
                lonCol='pentagonDiamondDitherPerSeasonRa',
                latCol='pentagonDiamondDitherPerSeasonDec',
                latLonDeg=raDecInDeg,
                nside=nside,
                useCache=False)
            slicer['SpiralDitherPerSeason'] = slicers.HealpixSlicer(
                lonCol='spiralDitherPerSeasonRa',
                latCol='spiralDitherPerSeasonDec',
                latLonDeg=raDecInDeg,
                nside=nside,
                useCache=False)
    if specifiedDith is not None:
        stackerList_, slicer_ = {}, {}
        if specifiedDith in slicer.keys():
            if specifiedDith.__contains__(
                    'Random'
            ):  # only Random dithers have a stacker object for rand seed specification
                stackerList_[specifiedDith] = stackerList[specifiedDith]
            slicer_[specifiedDith] = slicer[specifiedDith]
        else:
            raise ValueError(
                'Invalid value for specifiedDith: %s. Allowed values include one of the following:\n%s'
                % (specifiedDith, slicer.keys()))
        stackerList, slicer = stackerList_, slicer_

    # ------------------------------------------------------------------------
    if slair:
        m5Col = 'fivesigmadepth'
    else:
        m5Col = 'fiveSigmaDepth'
    # set up the metric
    if includeDustExtinction:
        # include dust extinction when calculating the co-added depth
        coaddMetric = metrics.ExgalM5(m5Col=m5Col, lsstFilter=filterBand)
    else:
        coaddMetric = metrics.Coaddm5Metric(m5col=m5col)
    dustMap = maps.DustMap(
        interp=False, nside=nside
    )  # include dustMap; actual in/exclusion of dust is handled by the galaxyCountMetric

    # ------------------------------------------------------------------------
    # set up the bundle
    coaddBundle = {}
    for dither in slicer:
        if dither in stackerList:
            coaddBundle[dither] = metricBundles.MetricBundle(
                coaddMetric,
                slicer[dither],
                sqlconstraint,
                stackerList=stackerList[dither],
                runName=runName,
                metadata=dither,
                mapsList=[dustMap])
        else:
            coaddBundle[dither] = metricBundles.MetricBundle(
                coaddMetric,
                slicer[dither],
                sqlconstraint,
                runName=runName,
                metadata=dither,
                mapsList=[dustMap])

    # ------------------------------------------------------------------------
    # run the analysis
    if includeDustExtinction:
        print('\n# Running coaddBundle with dust extinction ...')
    else:
        print('\n# Running coaddBundle without dust extinction ...')
    cGroup = metricBundles.MetricBundleGroup(coaddBundle,
                                             opsdb,
                                             outDir=outDir,
                                             resultsDb=resultsDb,
                                             saveEarly=False)
    cGroup.runAll()

    # ------------------------------------------------------------------------
    # plot and save the data
    plotBundleMaps(path,
                   outDir,
                   coaddBundle,
                   dataLabel='$%s$-band Coadded Depth' % filterBand,
                   filterBand=filterBand,
                   dataName='%s-band Coadded Depth' % filterBand,
                   skymap=plotSkymap,
                   powerSpectrum=plotPowerSpectrum,
                   cartview=plotCartview,
                   colorMin=unmaskedColorMin,
                   colorMax=unmaskedColorMax,
                   nTicks=nTicks,
                   showPlots=showPlots,
                   saveFigs=saveFigs,
                   outDirNameForSavedFigs='coaddM5Plots_unmaskedBorders')
    print('\n# Done saving plots without border masking.\n')

    # ------------------------------------------------------------------------
    plotHandler = plots.PlotHandler(outDir=outDir,
                                    resultsDb=resultsDb,
                                    thumbnail=False,
                                    savefig=False)

    print(
        '# Number of pixels in the survey region (before masking the border):')
    for dither in coaddBundle:
        print(
            '  %s: %s' %
            (dither,
             len(np.where(coaddBundle[dither].metricValues.mask == False)[0])))

    # ------------------------------------------------------------------------
    # save the unmasked data?
    if saveunMaskedCoaddData:
        outDir_new = 'unmaskedCoaddData'
        if not os.path.exists('%s%s/%s' % (path, outDir, outDir_new)):
            os.makedirs('%s%s/%s' % (path, outDir, outDir_new))
        saveBundleData_npzFormat('%s%s/%s' % (path, outDir, outDir_new),
                                 coaddBundle, 'coaddM5Data_unmasked',
                                 filterBand)

    # ------------------------------------------------------------------------
    # mask the edges
    print('\n# Masking the edges for coadd ...')
    coaddBundle = maskingAlgorithmGeneralized(
        coaddBundle,
        plotHandler,
        dataLabel='$%s$-band Coadded Depth' % filterBand,
        nside=nside,
        pixelRadius=pixelRadiusForMasking,
        plotIntermediatePlots=False,
        plotFinalPlots=False,
        printFinalInfo=True)
    if (pixelRadiusForMasking > 0):
        # plot and save the masked data
        plotBundleMaps(path,
                       outDir,
                       coaddBundle,
                       dataLabel='$%s$-band Coadded Depth' % filterBand,
                       filterBand=filterBand,
                       dataName='%s-band Coadded Depth' % filterBand,
                       skymap=plotSkymap,
                       powerSpectrum=plotPowerSpectrum,
                       cartview=plotCartview,
                       colorMin=maskedColorMin,
                       colorMax=maskedColorMax,
                       nTicks=nTicks,
                       showPlots=showPlots,
                       saveFigs=saveFigs,
                       outDirNameForSavedFigs='coaddM5Plots_maskedBorders')
        print('\n# Done saving plots with border masking. \n')

    # ------------------------------------------------------------------------
    # Calculate total power
    summarymetric = metrics.TotalPowerMetric()
    for dither in coaddBundle:
        coaddBundle[dither].setSummaryMetrics(summarymetric)
        coaddBundle[dither].computeSummaryStats()
        print('# Total power for %s case is %f.' %
              (dither, coaddBundle[dither].summaryValues['TotalPower']))
    print('')

    # ------------------------------------------------------------------------
    # run the alm analysis
    if almAnalysis:
        almPlots(path,
                 outDir,
                 copy.deepcopy(coaddBundle),
                 nside=nside,
                 filterband=filterBand,
                 raRange=raRange,
                 decRange=decRange,
                 showPlots=showPlots)
    # ------------------------------------------------------------------------
    # save the masked data?
    if saveMaskedCoaddData and (pixelRadiusForMasking > 0):
        outDir_new = 'maskedCoaddData'
        if not os.path.exists('%s%s/%s' % (path, outDir, outDir_new)):
            os.makedirs('%s%s/%s' % (path, outDir, outDir_new))
        saveBundleData_npzFormat('%s%s/%s' % (path, outDir, outDir_new),
                                 coaddBundle, 'coaddM5Data_masked', filterBand)

    # ------------------------------------------------------------------------
    # plot comparison plots
    if len(coaddBundle.keys()) > 1:  # more than one key
        # set up the directory
        outDir_comp = 'coaddM5ComparisonPlots'
        if not os.path.exists('%s%s/%s' % (path, outDir, outDir_comp)):
            os.makedirs('%s%s/%s' % (path, outDir, outDir_comp))
        # ------------------------------------------------------------------------
        # plot for the power spectra
        cl = {}
        for dither in plotColor:
            if dither in coaddBundle:
                cl[dither] = hp.anafast(hp.remove_dipole(
                    coaddBundle[dither].metricValues.filled(
                        coaddBundle[dither].slicer.badval)),
                                        lmax=500)
                ell = np.arange(np.size(cl[dither]))
                plt.plot(ell, (cl[dither] * ell * (ell + 1)) / 2.0 / np.pi,
                         color=plotColor[dither],
                         linestyle='-',
                         label=dither)
        plt.xlabel(r'$\ell$')
        plt.ylabel(r'$\ell(\ell+1)C_\ell/(2\pi)$')
        plt.xlim(0, 500)
        fig = plt.gcf()
        fig.set_size_inches(12.5, 10.5)
        leg = plt.legend(labelspacing=0.001)
        for legobj in leg.legendHandles:
            legobj.set_linewidth(4.0)
        filename = 'powerspectrum_comparison_all.png'
        plt.savefig('%s%s/%s/%s' % (path, outDir, outDir_comp, filename),
                    bbox_inches='tight',
                    format='png')
        plt.show()

        # create the histogram
        scale = hp.nside2pixarea(nside, degrees=True)

        def tickFormatter(y, pos):
            return '%d' % (y * scale)  # convert pixel count to area

        binsize = 0.01
        for dither in plotColor:
            if dither in coaddBundle:
                ind = np.where(
                    coaddBundle[dither].metricValues.mask == False)[0]
                binAll = int(
                    (max(coaddBundle[dither].metricValues.data[ind]) -
                     min(coaddBundle[dither].metricValues.data[ind])) /
                    binsize)
                plt.hist(coaddBundle[dither].metricValues.data[ind],
                         bins=binAll,
                         label=dither,
                         histtype='step',
                         color=plotColor[dither])
        ax = plt.gca()
        ymin, ymax = ax.get_ylim()
        nYticks = 10.
        wantedYMax = ymax * scale
        wantedYMax = 10. * np.ceil(float(wantedYMax) / 10.)
        increment = 5. * np.ceil(float(wantedYMax / nYticks) / 5.)
        wantedArray = np.arange(0, wantedYMax, increment)
        ax.yaxis.set_ticks(wantedArray / scale)
        ax.yaxis.set_major_formatter(FuncFormatter(tickFormatter))
        plt.xlabel('$%s$-band Coadded Depth' % filterBand)
        plt.ylabel('Area (deg$^2$)')
        fig = plt.gcf()
        fig.set_size_inches(12.5, 10.5)
        leg = plt.legend(labelspacing=0.001, loc=2)
        for legobj in leg.legendHandles:
            legobj.set_linewidth(2.0)
        filename = 'histogram_comparison.png'
        plt.savefig('%s%s/%s/%s' % (path, outDir, outDir_comp, filename),
                    bbox_inches='tight',
                    format='png')
        plt.show()
        # ------------------------------------------------------------------------
        # plot power spectra for the separte panel
        totKeys = len(list(coaddBundle.keys()))
        if (totKeys > 1):
            plt.clf()
            nCols = 2
            nRows = int(np.ceil(float(totKeys) / nCols))
            fig, ax = plt.subplots(nRows, nCols)
            plotRow = 0
            plotCol = 0
            for dither in list(plotColor.keys()):
                if dither in list(coaddBundle.keys()):
                    ell = np.arange(np.size(cl[dither]))
                    ax[plotRow, plotCol].plot(ell, (cl[dither] * ell *
                                                    (ell + 1)) / 2.0 / np.pi,
                                              color=plotColor[dither],
                                              label=dither)
                    if (plotRow == nRows - 1):
                        ax[plotRow, plotCol].set_xlabel(r'$\ell$')
                    ax[plotRow,
                       plotCol].set_ylabel(r'$\ell(\ell+1)C_\ell/(2\pi)$')
                    ax[plotRow,
                       plotCol].yaxis.set_major_locator(MaxNLocator(3))
                    if (dither != 'NoDither'):
                        ax[plotRow, plotCol].set_ylim(0, 0.0035)
                    ax[plotRow, plotCol].set_xlim(0, 500)
                    plotRow += 1
                    if (plotRow > nRows - 1):
                        plotRow = 0
                        plotCol += 1
            fig.set_size_inches(20, int(nRows * 30 / 7.))
            filename = 'powerspectrum_sepPanels.png'
            plt.savefig('%s%s/%s/%s' % (path, outDir, outDir_comp, filename),
                        bbox_inches='tight',
                        format='png')
            plt.show()
    return coaddBundle, outDir
Esempio n. 28
0
def go(nside=64, rmag=20., SedTemplate='flat', DoRun=False, LFilters = [], \
           LNightMax=[], nightMax=1e4, \
           CustomPlotLimits=True, \
           RunOne=False, MaxRuns=1e3, \
           SpatialClip=95.):

    # runNames = ['enigma_1189', 'ops2_1093']

    # runNames
    #runNames = ['ops2_1092', 'kraken_1038', 'kraken_1034', 'ops2_1098']
    #runNames = ['kraken_1038', 'kraken_1034', 'ops2_1098']

    # 2015-12-23 - put kraken_1038 at the end, it seems to run
    # extremely slowly...
    runNames = ['enigma_1189', 'ops2_1098', 'kraken_1034', 'kraken_1038']

    runNames = ['ops2_1092', 'kraken_1033', 'enigma_1271']

    # UPDATE - ops2_1092 ran quite quickly on nside=32... rerun on 64

    runNames = ['ops2_1092', 'enigma_1189', 'enigma_1271', 'kraken_1038']
    
    # UPDATE 2015-12-28 -- run with single-filter choices, compare
    # enigma to ops2_1092

    # WIC 2015-12-28 -- try with single-filter and all then small subset
    runNames = ['ops2_1092', 'ops2_1092', 'ops2_1092', 'enigma_1189', 'enigma_1189', 'enigma_1189']
    LFilters = ["u", "y", '',   "u", "y", '']
    LNightMax = [1e4, 1e4, 730, 1e4, 1e4, 730]

    # WIC 2015-12-28 - 23:00 - try using a different SED template,
    # just go with single filters for now
    #
    # DO WE NEED THIS??
    

    # WIC 2015-12-28 - 22:00; much to my surprise, that took less than
    # half an hour to go all the way through. Try again, this time using slightly more filters.    
    runNames = ['ops2_1092', 'enigma_1189', 'ops2_1092', 'enigma_1189']
    LFilters = ['', '', 'griz', 'griz']  # (griz was not recognized)

    # WIC 2015-12-29 - set up for a master-run with all cases, this time with plotting limits
    # Break the specifications across lines to make subdivision easier
    
    # Subsets by time first, then by filter, finally the whole shebang
    
    # (Yes the inversion of the first two is deliberate.)
    runNames = ['enigma_1189', 'ops2_1092', 'ops2_1092', 'enigma_1189', \
                    'ops2_1092', 'enigma_1189', 'ops2_1092', 'enigma_1189', \
                    'ops2_1092', 'enigma_1189']
    LFilters = ['', '', '', '', \
                    'u', 'u', 'y', 'y', \
                    '', '']  
    LNightMax = [365, 365, 730, 730, \
                     1e4, 1e4, 1e4, 1e4, \
                     1e4, 1e4]

    # List of upper limits to parallax and proper motion error. For parallax, 3.0 mas is probably good
    LUpperParallax = []
    LUpperPropmotion = []

    if CustomPlotLimits:
    
        LUpperParallax = [10, 10, 10, 10, \
                              10, 10, 40, 40, \
                              3.0, 3.0 ]
    
        # For proper motion, it's a little tricky to say because the
        # regular case is so pathological for the field. Try the following:
        LUpperPropmotion = [40, 40, 5, 20, \
                                3.5, 20, 3.5, 20, \
                                0.5, 5]


    print "runAstrom.go INFO - will run the following:"
    for iSho in range(len(runNames)):
        print "%i: %-12s, %1s, %i" % (iSho, runNames[iSho], LFilters[iSho], LNightMax[iSho])
    print "==========================="

#    print runNames
#    if not DoRun:
#        print "Set DoRun=True to actually run this."
#        print len(LFilters), len(runNames), len(LFilters) == len(runNames)
#        return

#'kraken_1038', 'kraken_1034', 'ops2_1098']


    # nside = 64

    slicer = slicers.HealpixSlicer(nside=nside)

    # Make it so we don't bother with the silly power spectra
    plotFuncs = [plots.HealpixSkyMap(), plots.HealpixHistogram()]

    # WIC - back up the plotting arguments with a default value
    plotFuncsPristine = copy.deepcopy(plotFuncs)

    # WIC - the only way this will make sense to me is if I make a
    # dictionary of plot arguments. Let's try it...
    DPlotArgs = {}
    for plotArg in ['parallax', 'propmotion', 'coverage', 'HAdegen']:
        DPlotArgs[plotArg] = copy.deepcopy(plotFuncs)

    if CustomPlotLimits:

        # All spatial maps use percentile clipping
        for plotMetric in DPlotArgs.keys():
            DPlotArgs[plotMetric][0].defaultPlotDict['percentileClip'] = SpatialClip

        # Some limits common to spatial maps and histograms
        for iPl in range(0,2):
            DPlotArgs['propmotion'][iPl].defaultPlotDict['logScale'] = True
            
        # Standardized range for the histograms for new parallax metrics
        DPlotArgs['coverage'][1].defaultPlotDict['xMin'] = 0.
        DPlotArgs['coverage'][1].defaultPlotDict['xMax'] = 1.
        DPlotArgs['HAdegen'][1].defaultPlotDict['xMin'] = -1.
        DPlotArgs['HAdegen'][1].defaultPlotDict['xMax'] =  1.
            
        # Standardize at least the lower bound of the histogram in
        # both the proper motion and parallax errors. Upper limit we
        # can customize with a loop.
        DPlotArgs['propmotion'][1].defaultPlotDict['xMin'] = 1e-2  # should not be zero if log scale!!
        DPlotArgs['parallax'][1].defaultPlotDict['xMin'] = 0.


    # WIC - try changing the plot dictionary

    if not DoRun:
        plotFuncs[0].defaultPlotDict['logScale'] = True
        print DPlotArgs['propmotion'][0].defaultPlotDict
        print DPlotArgs['propmotion'][1].defaultPlotDict
        
        return

    # The old runs have the seeing in finSeeing
    seeingCol = 'finSeeing'

    # Try it out for a 20th mag star with a flat SED (can change mag
    # or to OBAFGKM)
    # rmag = 20. ## NOW AN ARGUMENT
    #SedTemplate='flat'

    # Use all the observations. Can change if you want a different
    # time span
    sqlconstraint = ''

    # list of sqlconstraints now used, which gets handled within the loop.

    # run some summary stats on everything
    summaryMetrics = [metrics.MedianMetric()]

    tStart = time.time()

    # Running one, or the whole lot?
    RunMax = len(runNames)

    # allow user to set a different number (say, 2)
    if MaxRuns < RunMax and MaxRuns > 0:
        RunMax = int(MaxRuns)

    # the following keyword overrides
    if RunOne:
        RunMax = 1

    print "Starting runs. RunMax = %i" % (RunMax)

    for iRun in range(RunMax):
        run = runNames[iRun][:]

    # for run in runNames:
        # Open the OpSim database
        timeStartIteration = time.time()

        # Some syntax added to test for existence of the database
        dbFil = run+'_sqlite.db'
        if not os.access(dbFil, os.R_OK):
            print "runAstrom.go FATAL - cannot acces db file %s" % (dbFil)
            print "runAstrom.go FATAL - skipping run %s" % (run)
            continue
    
        else:
            deltaT = time.time()-tStart
            print "runAstrom.go INFO - ##################################"
            print "runAstrom.go INFO - starting run %s with nside=%i after %.2f minutes" \
                % (run, nside, deltaT/60.)

        opsdb = db.OpsimDatabase(run+'_sqlite.db')

        # Set SQL constraint appropriate for each filter in the
        # list. If we supplied a list of filters, use it for 
        sqlconstraint = ''
        ThisFilter = 'ugrizy'
        if len(LFilters) == len(runNames):

            # Only change the filter if one was actually supplied!
            if len(LFilters[iRun]) == 1:
                ThisFilter = LFilters[iRun]
                sqlconstraint = 'filter = "%s"' % (ThisFilter)

        # If nightmax was supplied, use it 
        ThisNightMax = int(nightMax)  # copy not view
        if len(LNightMax) == len(runNames):

            # Only update nightmax if one was given
            try:
                ThisNightMax = int(LNightMax[iRun])  # This might be redundant with the fmt statement below.
                if len(sqlconstraint) < 1:
                    sqlconstraint = 'night < %i' % (ThisNightMax)
                else:
                    sqlconstraint = '%s and night < %i' % (sqlconstraint, ThisNightMax)
            except:
                print "runAstrom.go WARN - run %i problem with NightMax" % (iRun)
                dumdum = 1.

        # Set where the output should go - include the filter!! 
        sMag = '%.1f' % (rmag)
        sMag = sMag.replace(".","p")
        outDir = '%s_nside%i_%s_n%i_r%s' % (run, nside, ThisFilter, ThisNightMax, sMag)

        # Ensure we'll be able to find this later on...
        if CustomPlotLimits:
            outDir = '%s_lims' % (outDir)

        # From this point onwards, stuff actually gets run. This is
        # the place to output what will actually happen next.
        print "runAstrom.go INFO - about to run:"
        print "runAstrom.go INFO - sqlconstraint: %s ; run name %s ; nside %i" % (sqlconstraint, run, nside)
        print "runAstrom.go INFO - output directory will be %s" % (outDir)
        if not DoRun:
            continue

        resultsDb = db.ResultsDb(outDir=outDir)
        bundleList = []

        # WIC - to make this at least somewhat uniform, build the plot
        # functions including arguments out of our copies above.
        plotFuncsPropmotion = copy.deepcopy(DPlotArgs['propmotion'])
        plotFuncsParallax = copy.deepcopy(DPlotArgs['parallax'])
        plotFuncsCoverage = copy.deepcopy(DPlotArgs['coverage'])
        plotFuncsHAdegen = copy.deepcopy(DPlotArgs['HAdegen'])

        # if using custom plot limits, will want to include the limits
        # for proper motion and parallax too... programming a bit defensively 
        # here, including an extra check (rather than just the length of the lists 
        # above). 
        if CustomPlotLimits:
            if len(LUpperParallax) == len(runNames):
                plotFuncsParallax[1].defaultPlotDict['xMax'] = float(LUpperParallax[iRun])

            if len(LUpperPropmotion) == len(runNames):
                plotFuncsPropmotion[1].defaultPlotDict['xMax'] = float(LUpperPropmotion[iRun])

        # Configure the metrics
        metric = metrics.ParallaxMetric(rmag=rmag, seeingCol=seeingCol, SedTemplate=SedTemplate)
        bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, runName=run,
#                                            plotFuncs=plotFuncs, \
                                                plotFuncs = plotFuncsParallax, \
                                                summaryMetrics=summaryMetrics)
        bundleList.append(bundle)

        metric=metrics.ProperMotionMetric(rmag=rmag, seeingCol=seeingCol, SedTemplate=SedTemplate)
        bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, runName=run,
#                                            plotFuncs=plotFuncs, \
                                                plotFuncs=plotFuncsPropmotion, \
                                                summaryMetrics=summaryMetrics)
        bundleList.append(bundle)

        metric = metrics.ParallaxCoverageMetric(rmag=rmag, seeingCol=seeingCol, SedTemplate=SedTemplate)
        bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, runName=run,
#                                            plotFuncs=plotFuncs, \
                                                plotFuncs=plotFuncsCoverage, \
                                                summaryMetrics=summaryMetrics)
        bundleList.append(bundle)

        metric = metrics.ParallaxHADegenMetric(rmag=rmag, seeingCol=seeingCol, SedTemplate=SedTemplate)
        bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, runName=run,
#                                            plotFuncs=plotFuncs, \
                                                plotFuncs=plotFuncsHAdegen, \
                                                summaryMetrics=summaryMetrics)
        bundleList.append(bundle)

        # Run everything and make plots
        bundleDict = metricBundles.makeBundlesDictFromList(bundleList)
        bgroup = metricBundles.MetricBundleGroup(bundleDict, opsdb, outDir=outDir, resultsDb=resultsDb)
#        try:
        bgroup.runAll()

        print "runAstrom.go INFO - bundles took %.2f minutes" \
            % ((time.time() - timeStartIteration) / 60.)

#        except KeyboardInterrupt:
#            print "runAstrom.go FATAL - keyboard interrupt detected. Halting."
#            return
        bgroup.plotAll()

        print "runAstrom.go INFO - bundles + plotting took %.2f minutes" \
            % ((time.time() - timeStartIteration) / 60.)
        

    print "Finished entire set. %i runs took %.2f minutes." % (iRun + 1, (time.time()-tStart)/60.)
Esempio n. 29
0
def run(config_filename):
    # YAML input file.
    config = yaml.load(open(config_filename), Loader=yaml.FullLoader)
    # print(config)
    outDir = 'Test'  # this is for MAF

    # grab the db filename from yaml input file
    dbFile = config['Observations']['filename']

    """
    conn = sqlite3.connect(dbFile)
    cur = conn.cursor()
    table_name='Proposal'
    result = cur.execute("PRAGMA table_info('%s')" % table_name).fetchall()
    print('Results',result)

    cur.execute("SELECT * FROM Proposal")
    rows = cur.fetchall()
    for row in rows:
        print(row)
    print('end')
    cur.execute('PRAGMA TABLE_INFO({})'.format('ObsHistory'))

    names = [tup[1] for tup in cur.fetchall()]
    print(names)
    """
    opsimdb = db.OpsimDatabase(dbFile)
    # version = opsimdb.opsimVersion
    propinfo, proptags = opsimdb.fetchPropInfo()
    print('proptags and propinfo', proptags, propinfo)

    # grab the fieldtype (DD or WFD) from yaml input file
    fieldtype = config['Observations']['fieldtype']
    fake_file = config['Fake_file']
    module = import_module(config['Metric'])

    slicer = slicers.HealpixSlicer(nside=config['Pixelisation']['nside'])

    sqlconstraint = opsimdb.createSQLWhere(fieldtype, proptags)

    bundles = []
    names = []
    lim_sn = {}
    bands = config['Observations']['bands']
    z = config['Observations']['z']
    metric = {}
    # processing. Band after band

    Ra_ref = 0.000
    Dec_ref = -2.308039
    time_ref = time.time()
    for band in bands:
        sql_i = sqlconstraint+' AND '
        sql_i += 'filter = "%s"' % (band)
        # sql_i += ' AND abs(fieldRA-(%f))< %f' % (Ra_ref, 1.e-2)+' AND '
        # sql_i += 'abs(fieldDec-(%f))< %f' % (Dec_ref, 1.e-2)

        lim_sn[band] = ReferenceData(
            config['Li file'], config['Mag_to_flux file'], band, z)

        metric[band] = module.SNSNRMetric(lim_sn=lim_sn[band], names_ref=config['names_ref'], fake_file=fake_file, coadd=config['Observations']
                                          ['coadd'], z=z, display=config['Display_Processing'], season=config['Observations']['season'])
        bundles.append(metricBundles.MetricBundle(metric[band], slicer, sql_i))
        names.append(band)

    bdict = dict(zip(names, bundles))

    resultsDb = db.ResultsDb(outDir='None')
    mbg = metricBundles.MetricBundleGroup(bdict, opsimdb,
                                          outDir=outDir, resultsDb=resultsDb)

    mbg.runAll()

    # Let us display the results

    for band, val in bdict.items():
        metValues = val.metricValues[~val.metricValues.mask]
        res = None
        for vals in metValues:
            if res is None:
                res = vals
            else:
                res = np.concatenate((res, vals))
        res = np.unique(res)

        """
        sn_plot.detecFracPlot(res, config['Pixelisation']
                              ['nside'], config['names_ref'])

        sn_plot.detecFracHist(res, config['names_ref'])
        """
    plt.show()
Esempio n. 30
0
#asciiLC is the template light curve
asciiLC = 'supernova1b_template.dat'
filterNames = ['u', 'g', 'r', 'i', 'z']
colors = {'u': 'purple', 'g': 'g', 'r': 'r', 'i': 'blue', 'z': 'm'}
#create list of peak days and magnitudes to iterate over
day_of_peak = np.arange(59580, 63232, 30)
mag_of_peak = np.arange(17, 25, 1)

# Set the database and query
runName = 'minion_1018'
opsdb = db.OpsimDatabase(runName + '_sqlite.db')

# Set the output directory
outDir = 'Observations Dictionary'
resultsDb = db.ResultsDb(outDir)

# This creates our database of observations. The pass metric just passes data straight through.
metric = metrics.PassMetric(cols=['expMJD', 'filter', 'fiveSigmaDepth'])
"""use slicer to restrict the ra and decs, use np.random.uniform to get random points, 
	first coordinate represents ra and second dec. Or, give a list of specific
	ra and decs - the second slicer is for the deep drilling fields. One must be commented out."""
#slicer = slicers.UserPointsSlicer(np.random.uniform(0,360,1000), np.random.uniform(-80,0,1000))
slicer = slicers.UserPointsSlicer([349.4, 0.00, 53.0, 34.4, 150.4],
                                  [-63.3, -45.5, -27.4, -5.1, 2.8])
#sql is empty as there are no restrictions currently
sql = ''
bundle = metricBundles.MetricBundle(metric, slicer, sql)
bg = metricBundles.MetricBundleGroup({0: bundle},
                                     opsdb,
                                     outDir=outDir,