Beispiel #1
0
def connectOpsimDb(database, summaryOnly=False, summaryTable='summary'):
    """
    Convenience function to handle connecting to database.

    Parameters
    ----------
    database : str
        The path to the OpSim sqlite database file.
    summaryOnly : bool
        Flag indicating that the opsim database only contains a summary table (or not - in which case,
        a sqlite database file with all tables is expected).
    summaryTable : str
        The name of the summary table.

    Returns
    -------
    OpsimDatabase
    """
    import lsst.sims.maf.db as db
    if summaryOnly:
        # Connect to just the summary table (might be sqlite created from flat dat output file).
        opsimdb = db.OpsimDatabase(database=database,
                                   dbTables={'Summary':[summaryTable, 'obsHistID']},
                                   defaultdbTables = None)
    else:
        # For a basic db connection to the sqlite db files.
        opsimdb = db.OpsimDatabase(database=database)
    return opsimdb
def compute_metric(params):
    """Function to execute the metric calculation when code is called from
    the commandline"""

    obsdb = db.OpsimDatabase('../../tutorials/baseline2018a.db')
    outputDir = '/home/docmaf/'
    resultsDb = db.ResultsDb(outDir=outputDir)

    (propids, proptags) = obsdb.fetchPropInfo()
    surveyWhere = obsdb.createSQLWhere(params['survey'], proptags)

    obs_params = {
        'filters': params['filters'],
        'cadence': params['cadence'],
        'start_date': params['start_date'],
        'end_date': params['end_date']
    }

    metric = CadenceOverVisibilityWindowMetric(**obs_params)
    slicer = slicers.HealpixSlicer(nside=64)
    sqlconstraint = surveyWhere
    bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint)

    bgroup = metricBundles.MetricBundleGroup({0: bundle},
                                             obsdb,
                                             outDir='newmetric_test',
                                             resultsDb=resultsDb)
    bgroup.runAll()
Beispiel #3
0
    def testOut(self):
        """
        Check that the metric bundle can generate the expected output
        """
        slicer = slicers.HealpixSlicer(nside=8)
        metric = metrics.MeanMetric(col='airmass')
        sql = 'filter="r"'

        metricB = metricBundles.MetricBundle(metric, slicer, sql)
        filepath = os.path.join(os.getenv('SIMS_MAF_DIR'), 'tests/')

        database = os.path.join(filepath, 'opsimblitz1_1133_sqlite.db')
        opsdb = db.OpsimDatabase(database=database)
        resultsDb = db.ResultsDb(outDir=self.outDir)

        bgroup = metricBundles.MetricBundleGroup({0: metricB},
                                                 opsdb,
                                                 outDir=self.outDir,
                                                 resultsDb=resultsDb)
        bgroup.runAll()
        bgroup.plotAll()
        bgroup.writeAll()

        outThumbs = glob.glob(os.path.join(self.outDir, 'thumb*'))
        outNpz = glob.glob(os.path.join(self.outDir, '*.npz'))
        outPdf = glob.glob(os.path.join(self.outDir, '*.pdf'))

        # By default, make 3 plots for healpix
        assert (len(outThumbs) == 3)
        assert (len(outPdf) == 3)
        assert (len(outNpz) == 1)
Beispiel #4
0
def connect_dbs(dbDir, outDir, dbRuns=None):
    """
    Initiate database objects to all opSim databases in the provided directory.
    Returns a dictionary consisting all database connections and a dictionary
    holding the resultsDb objects.

    Args:
        dbDir(str): The path to the dabase directory.
        outDir(str): The path to the result database directory.

    Returns:
        opSimDbs(dict): A dictionary containing the OpsimDatabase objects for
            opsim databases in the provided directory, keys are the run names.
        resultDbs(str): A dictionary containing the ResultsDb objects for opsim
            databases in the provided directory, keys are the run names.
    """
    opSimDbs = {}
    resultDbs = {}

    if dbRuns is None:
        dbDir = os.path.abspath(dbDir)
        db_list = glob.glob(dbDir + '/*.db')
    else:
        db_list = [os.path.join(dbDir, dbRun + '.db') for dbRun in dbRuns]

    for dbPath in db_list:
        dbName = os.path.basename(dbPath)
        opSimDbs[os.path.splitext(dbName)[0]] = db.OpsimDatabase(dbPath)
        resultDbs[os.path.splitext(dbName)[0]] = \
            db.ResultsDb(outDir=outDir,
                         database=os.path.splitext(dbName)[0]+'_result.db')
    return (opSimDbs, resultDbs)
    def __getOpSimMjd(self, opsim, ra, dec, fil):
        colmn = 'observationStartMJD'
        opsdb = db.OpsimDatabase(opsim)

        # Directory where tmp files are going to be stored TODO eliminate - this
        outDir = 'TmpDir'
        resultsDb = db.ResultsDb(outDir=outDir)

        metric = metrics.PassMetric(cols=[colmn, 'fiveSigmaDepth', 'filter'])
        slicer = slicers.UserPointsSlicer(ra=ra, dec=dec)
        sqlconstraint = 'filter = \'' + fil + '\''

        bundle = mb.MetricBundle(metric, slicer, sqlconstraint, runName='name')
        bgroup = mb.MetricBundleGroup({0: bundle},
                                      opsdb,
                                      outDir=outDir,
                                      resultsDb=resultsDb)
        bgroup.runAll()

        filters = np.unique(bundle.metricValues[0]['filter'])
        mv = bundle.metricValues[0]

        # Get dates
        mjd = mv[colmn]
        mjd = np.sort(mjd)
        print('Num of visits ' + str(len(mjd)) + ' ' + opsim)
        return mjd
Beispiel #6
0
def load_and_run():
    dbFile = 'baseline_nexp2_v1.7_10yrs.db'
    opsimdb = db.OpsimDatabase(dbFile)
    runName = dbFile.replace('.db', '')

    nside = 64
    slicer = slicers.HealpixSlicer(nside=nside)

    metric = SNNSNMetric(verbose=False)  #, zlim_coeff=0.98)

    bundleList = []

    #sql = ''
    sql = '(note = "%s")' % ('DD:COSMOS')

    bundleList.append(
        metricBundles.MetricBundle(metric, slicer, sql, runName=runName))

    outDir = 'temp'
    resultsDb = db.ResultsDb(outDir=outDir)
    bundleDict = metricBundles.makeBundlesDictFromList(bundleList)
    bgroup = metricBundles.MetricBundleGroup(bundleDict,
                                             opsimdb,
                                             outDir=outDir,
                                             resultsDb=resultsDb)
    bgroup.runAll()
    bgroup.plotAll()
Beispiel #7
0
def runChips(useCamera=False):
    import numpy as np
    import lsst.sims.maf.slicers as slicers
    import lsst.sims.maf.metrics as metrics
    import lsst.sims.maf.metricBundles as metricBundles
    import lsst.sims.maf.db as db
    from lsst.sims.maf.plots import PlotHandler
    import matplotlib.pylab as plt
    import healpy as hp


    print 'Camera setting = ', useCamera

    database = 'enigma_1189_sqlite.db'
    sqlWhere = 'filter = "r" and night < 800 and fieldRA < %f and fieldDec > %f and fieldDec < 0' % (np.radians(15), np.radians(-15))
    opsdb = db.OpsimDatabase(database)
    outDir = 'Camera'
    resultsDb = db.ResultsDb(outDir=outDir)

    nside=512
    tag = 'F'
    if useCamera:
        tag='T'
    metric = metrics.CountMetric('expMJD', metricName='chipgap_%s'%tag)

    slicer = slicers.HealpixSlicer(nside=nside, useCamera=useCamera)
    bundle1 = metricBundles.MetricBundle(metric,slicer,sqlWhere)

    bg = metricBundles.MetricBundleGroup({0:bundle1},opsdb, outDir=outDir, resultsDb=resultsDb)
    bg.runAll()
    hp.gnomview(bundle1.metricValues, xsize=800,ysize=800, rot=(7,-7,0), unit='Count', min=1)
    plt.savefig(outDir+'/fig'+tag+'.png')
Beispiel #8
0
def run_maf(dbFile, ra, dec):
    """Retrive min inter_night gap, and observation history with the input of database file name and arrays of RA and DEC.

    Note: the observing cadence returned are not ordered by date!! 
    """

    # establish connection to sqllite database file.
    opsimdb = db.OpsimDatabase(dbFile)

    # While we're in transition between opsim v3 and v4, this may be helpful: print("{dbFile} is an opsim version {version} database".format(dbFile=dbFile, version=opsimdb.opsimVersion))
    if opsimdb.opsimVersion == "V3":
        # For v3 databases:
        mjdcol = 'expMJD'
        degrees = False
        cols = ['filter', 'fiveSigmaDepth', mjdcol, 'expDate']
        stackerList = []
    else:
        # For v4 and alternate scheduler databases.
        mjdcol = 'observationStartMJD'
        degrees = True
        cols = ['filter', 'fiveSigmaDepth', mjdcol]
        stackerList = [expDateStacker()]

    # IntraNightGapsMetric returns the gap (in days) between observations within the same night custom reduceFunc to find min gaps
    metric = metrics.cadenceMetrics.IntraNightGapsMetric(reduceFunc=np.amin,
                                                         mjdCol=mjdcol)
    # PassMetric just pass all values
    metric_pass = metrics.simpleMetrics.PassMetric(cols=cols)
    # slicer for slicing pointing history
    slicer = slicers.UserPointsSlicer(ra,
                                      dec,
                                      lonCol='fieldRA',
                                      latCol='fieldDec',
                                      latLonDeg=degrees)
    # sql constrains, 3 for baseline2018a, 1 for rolling m2045
    sql = ''

    # bundles to combine metric, slicer and sql constrain together
    bundle = metricBundles.MetricBundle(metric, slicer, sql)
    date_bundle = metricBundles.MetricBundle(metric_pass,
                                             slicer,
                                             sql,
                                             stackerList=stackerList)

    # create metric bundle group and returns
    bg = metricBundles.MetricBundleGroup(
        {
            'sep': bundle,
            'cadence': date_bundle
        },
        opsimdb,
        outDir=outDir,
        resultsDb=resultsDb)
    bg.runAll()
    opsimdb.close()
    return bg
Beispiel #9
0
 def testOpsimDbSetup(self):
     """Test opsim specific database class setup/instantiation."""
     # Test tables were connected to.
     self.assertTrue(isinstance(self.oo.tables, dict))
     self.assertEqual(self.oo.dbTables['Summary'][0], 'Summary')
     # Test can override default table name/id keys if needed.
     oo = db.OpsimDatabase(
         database=self.database,
         dbTables={'Summary': ['ObsHistory', 'obsHistID']})
     self.assertEqual(oo.dbTables['Summary'][0], 'ObsHistory')
    def testOut(self):
        """
        Check that the metric bundle can generate the expected output
        """
        nside = 8
        slicer = slicers.HealpixSlicer(nside=nside)
        metric = metrics.MeanMetric(col='airmass')
        sql = 'filter="r"'
        stacker1 = stackers.RandomDitherFieldPerVisitStacker()
        stacker2 = stackers.GalacticStacker()
        map1 = maps.GalCoordsMap()
        map2 = maps.StellarDensityMap()

        metricB = metricBundles.MetricBundle(metric,
                                             slicer,
                                             sql,
                                             stackerList=[stacker1, stacker2])
        filepath = os.path.join(os.getenv('SIMS_MAF_DIR'), 'tests/')

        database = os.path.join(filepath, 'opsimblitz1_1133_sqlite.db')
        opsdb = db.OpsimDatabase(database=database)
        resultsDb = db.ResultsDb(outDir=self.outDir)

        bgroup = metricBundles.MetricBundleGroup({0: metricB},
                                                 opsdb,
                                                 outDir=self.outDir,
                                                 resultsDb=resultsDb)
        bgroup.runAll()
        bgroup.plotAll()
        bgroup.writeAll()

        outThumbs = glob.glob(os.path.join(self.outDir, 'thumb*'))
        outNpz = glob.glob(os.path.join(self.outDir, '*.npz'))
        outPdf = glob.glob(os.path.join(self.outDir, '*.pdf'))

        # By default, make 3 plots for healpix
        assert (len(outThumbs) == 3)
        assert (len(outPdf) == 3)
        assert (len(outNpz) == 1)
Beispiel #11
0
def run(config_filename):
    # YAML input file.
    config = yaml.load(open(config_filename), Loader=yaml.FullLoader)
    # print(config)
    outDir = 'Test'  # this is for MAF

    # grab the db filename from yaml input file
    dbFile = config['Observations']['filename']

    """
    conn = sqlite3.connect(dbFile)
    cur = conn.cursor()
    table_name='Proposal'
    result = cur.execute("PRAGMA table_info('%s')" % table_name).fetchall()
    print('Results',result)

    cur.execute("SELECT * FROM Proposal")
    rows = cur.fetchall()
    for row in rows:
        print(row)
    print('end')
    cur.execute('PRAGMA TABLE_INFO({})'.format('ObsHistory'))

    names = [tup[1] for tup in cur.fetchall()]
    print(names)
    """
    opsimdb = db.OpsimDatabase(dbFile)
    # version = opsimdb.opsimVersion
    propinfo, proptags = opsimdb.fetchPropInfo()
    print('proptags and propinfo', proptags, propinfo)

    # grab the fieldtype (DD or WFD) from yaml input file
    fieldtype = config['Observations']['fieldtype']
    fake_file = config['Fake_file']
    module = import_module(config['Metric'])

    slicer = slicers.HealpixSlicer(nside=config['Pixelisation']['nside'])

    sqlconstraint = opsimdb.createSQLWhere(fieldtype, proptags)

    bundles = []
    names = []
    lim_sn = {}
    bands = config['Observations']['bands']
    z = config['Observations']['z']
    metric = {}
    # processing. Band after band

    Ra_ref = 0.000
    Dec_ref = -2.308039
    time_ref = time.time()
    for band in bands:
        sql_i = sqlconstraint+' AND '
        sql_i += 'filter = "%s"' % (band)
        # sql_i += ' AND abs(fieldRA-(%f))< %f' % (Ra_ref, 1.e-2)+' AND '
        # sql_i += 'abs(fieldDec-(%f))< %f' % (Dec_ref, 1.e-2)

        lim_sn[band] = ReferenceData(
            config['Li file'], config['Mag_to_flux file'], band, z)

        metric[band] = module.SNSNRMetric(lim_sn=lim_sn[band], names_ref=config['names_ref'], fake_file=fake_file, coadd=config['Observations']
                                          ['coadd'], z=z, display=config['Display_Processing'], season=config['Observations']['season'])
        bundles.append(metricBundles.MetricBundle(metric[band], slicer, sql_i))
        names.append(band)

    bdict = dict(zip(names, bundles))

    resultsDb = db.ResultsDb(outDir='None')
    mbg = metricBundles.MetricBundleGroup(bdict, opsimdb,
                                          outDir=outDir, resultsDb=resultsDb)

    mbg.runAll()

    # Let us display the results

    for band, val in bdict.items():
        metValues = val.metricValues[~val.metricValues.mask]
        res = None
        for vals in metValues:
            if res is None:
                res = vals
            else:
                res = np.concatenate((res, vals))
        res = np.unique(res)

        """
        sn_plot.detecFracPlot(res, config['Pixelisation']
                              ['nside'], config['names_ref'])

        sn_plot.detecFracHist(res, config['names_ref'])
        """
    plt.show()
Beispiel #12
0
def coaddM5Analysis(path,
                    dbfile,
                    runName,
                    slair=False,
                    WFDandDDFs=False,
                    noDithOnly=False,
                    bestDithOnly=False,
                    someDithOnly=False,
                    specifiedDith=None,
                    nside=128,
                    filterBand='r',
                    includeDustExtinction=False,
                    saveunMaskedCoaddData=False,
                    pixelRadiusForMasking=5,
                    cutOffYear=None,
                    plotSkymap=True,
                    plotCartview=True,
                    unmaskedColorMin=None,
                    unmaskedColorMax=None,
                    maskedColorMin=None,
                    maskedColorMax=None,
                    nTicks=5,
                    plotPowerSpectrum=True,
                    showPlots=True,
                    saveFigs=True,
                    almAnalysis=True,
                    raRange=[-50, 50],
                    decRange=[-65, 5],
                    saveMaskedCoaddData=True):
    """

    Analyze the artifacts induced in the coadded 5sigma depth due to imperfect observing strategy.
      - Creates an output directory for subdirectories containing the specified things to save.
      - Creates, shows, and saves comparison plots.
      - Returns the metricBundle object containing the calculated coadded depth, and the output directory name.

    Required Parameters
    -------------------
      * path: str: path to the main directory where output directory is to be saved.
      * dbfile: str: path to the OpSim output file, e.g. to a copy of enigma_1189
      * runName: str: run name tag to identify the output of specified OpSim output, e.g. 'enigma1189' 

    Optional Parameters
    -------------------
      * slair: boolean: set to True if analysis on a SLAIR output.
                        Default: False
      * WFDandDDFs: boolean: set to True if want to consider both WFD survet and DDFs. Otherwise will only work
                             with WFD. Default: False
      * noDithOnly: boolean: set to True if only want to consider the undithered survey. Default: False
      * bestDithOnly: boolean: set to True if only want to consider RandomDitherFieldPerVisit.
                               Default: False
      * someDithOnly: boolean: set to True if only want to consider undithered and a few dithered surveys. 
                               Default: False
      * specifiedDith: str: specific dither strategy to run.
                            Default: None
      * nside: int: HEALpix resolution parameter. Default: 128
      * filterBand: str: any one of 'u', 'g', 'r', 'i', 'z', 'y'. Default: 'r'
      * includeDustExtinction: boolean: set to include dust extinction. Default: False
      * saveunMaskedCoaddData: boolean: set to True to save data before border masking. Default: False
      * pixelRadiusForMasking: int: number of pixels to mask along the shallow border. Default: 5

      * cutOffYear: int: year cut to restrict analysis to only a subset of the survey. 
                         Must range from 1 to 9, or None for the full survey analysis (10 yrs).
                         Default: None
      * plotSkymap: boolean: set to True if want to plot skymaps. Default: True
      * plotCartview: boolean: set to True if want to plot cartview plots. Default: False
      * unmaskedColorMin: float: lower limit on the colorscale for unmasked skymaps. Default: None
      * unmaskedColorMax: float: upper limit on the colorscale for unmasked skymaps. Default: None

      * maskedColorMin: float: lower limit on the colorscale for border-masked skymaps. Default: None
      * maskedColorMax: float: upper limit on the colorscale for border-masked skymaps. Default: None
      * nTicks: int: (number of ticks - 1) on the skymap colorbar. Default: 5
      * plotPowerSpectrum: boolean: set to True if want to plot powerspectra. Default: True

      * showPlots: boolean: set to True if want to show figures. Default: True
      * saveFigs: boolean: set to True if want to save figures. Default: True
      
      * almAnalysis: boolean: set to True to perform the alm analysis. Default: True
      * raRange: float array: range of right ascention (in degrees) to consider in alm  cartview plot;
                              applicable when almAnalysis=True. Default: [-50,50]
      * decRange: float array: range of declination (in degrees) to consider in alm cartview plot; 
                               applicable when almAnalysis=True. Default: [-65,5]
      * saveMaskedCoaddData: boolean: set to True to save the coadded depth data after the border
                                      masking. Default: True

    """
    # ------------------------------------------------------------------------
    # read in the database
    if slair:
        # slair database
        opsdb = db.Database(dbfile, defaultTable='observations')
    else:
        # OpSim database
        opsdb = db.OpsimDatabase(dbfile)

    # ------------------------------------------------------------------------
    # set up the outDir
    zeropt_tag = ''
    if cutOffYear is not None: zeropt_tag = '%syearCut' % cutOffYear
    else: zeropt_tag = 'fullSurveyPeriod'

    if includeDustExtinction: dust_tag = 'withDustExtinction'
    else: dust_tag = 'noDustExtinction'

    regionType = ''
    if WFDandDDFs: regionType = 'WFDandDDFs_'

    outDir = 'coaddM5Analysis_%snside%s_%s_%spixelRadiusForMasking_%sBand_%s_%s_directory' % (
        regionType, nside, dust_tag, pixelRadiusForMasking, filterBand,
        runName, zeropt_tag)
    print('# outDir: %s' % outDir)
    resultsDb = db.ResultsDb(outDir=outDir)

    # ------------------------------------------------------------------------
    # set up the sql constraint
    if WFDandDDFs:
        if cutOffYear is not None:
            nightCutOff = (cutOffYear) * 365.25
            sqlconstraint = 'night<=%s and filter=="%s"' % (nightCutOff,
                                                            filterBand)
        else:
            sqlconstraint = 'filter=="%s"' % filterBand
    else:
        # set up the propID and units on the ra, dec
        if slair:  # no prop ID; only WFD is simulated.
            wfdWhere = ''
            raDecInDeg = True
        else:
            propIds, propTags = opsdb.fetchPropInfo()
            wfdWhere = '%s and ' % opsdb.createSQLWhere('WFD', propTags)
            raDecInDeg = opsdb.raDecInDeg
        # set up the year cutoff
        if cutOffYear is not None:
            nightCutOff = (cutOffYear) * 365.25
            sqlconstraint = '%snight<=%s and filter=="%s"' % (
                wfdWhere, nightCutOff, filterBand)
        else:
            sqlconstraint = '%sfilter=="%s"' % (wfdWhere, filterBand)
    print('# sqlconstraint: %s' % sqlconstraint)

    # ------------------------------------------------------------------------
    # setup all the slicers
    slicer = {}
    stackerList = {}

    if specifiedDith is not None:  # would like to add all the stackers first and then keep only the one that is specified
        bestDithOnly, noDithOnly = False, False

    if bestDithOnly:
        stackerList['RandomDitherFieldPerVisit'] = [
            mafStackers.RandomDitherFieldPerVisitStacker(degrees=raDecInDeg,
                                                         randomSeed=1000)
        ]
        slicer['RandomDitherFieldPerVisit'] = slicers.HealpixSlicer(
            lonCol='randomDitherFieldPerVisitRa',
            latCol='randomDitherFieldPerVisitDec',
            latLonDeg=raDecInDeg,
            nside=nside,
            useCache=False)
    else:
        if slair:
            slicer['NoDither'] = slicers.HealpixSlicer(lonCol='RA',
                                                       latCol='dec',
                                                       latLonDeg=raDecInDeg,
                                                       nside=nside,
                                                       useCache=False)
        else:
            slicer['NoDither'] = slicers.HealpixSlicer(lonCol='fieldRA',
                                                       latCol='fieldDec',
                                                       latLonDeg=raDecInDeg,
                                                       nside=nside,
                                                       useCache=False)
        if someDithOnly and not noDithOnly:
            #stackerList['RepulsiveRandomDitherFieldPerVisit'] = [myStackers.RepulsiveRandomDitherFieldPerVisitStacker(degrees=raDecInDeg,
            #                                                                                                          randomSeed=1000)]
            #slicer['RepulsiveRandomDitherFieldPerVisit'] = slicers.HealpixSlicer(lonCol='repulsiveRandomDitherFieldPerVisitRa',
            #                                                                    latCol='repulsiveRandomDitherFieldPerVisitDec',
            #                                                                    latLonDeg=raDecInDeg, nside=nside,
            #                                                                    useCache=False)
            slicer['SequentialHexDitherFieldPerNight'] = slicers.HealpixSlicer(
                lonCol='hexDitherFieldPerNightRa',
                latCol='hexDitherFieldPerNightDec',
                latLonDeg=raDecInDeg,
                nside=nside,
                useCache=False)
            slicer['PentagonDitherPerSeason'] = slicers.HealpixSlicer(
                lonCol='pentagonDitherPerSeasonRa',
                latCol='pentagonDitherPerSeasonDec',
                latLonDeg=raDecInDeg,
                nside=nside,
                useCache=False)
        elif not noDithOnly:
            # random dithers on different timescales
            stackerList['RandomDitherPerNight'] = [
                mafStackers.RandomDitherPerNightStacker(degrees=raDecInDeg,
                                                        randomSeed=1000)
            ]
            stackerList['RandomDitherFieldPerNight'] = [
                mafStackers.RandomDitherFieldPerNightStacker(
                    degrees=raDecInDeg, randomSeed=1000)
            ]
            stackerList['RandomDitherFieldPerVisit'] = [
                mafStackers.RandomDitherFieldPerVisitStacker(
                    degrees=raDecInDeg, randomSeed=1000)
            ]

            # rep random dithers on different timescales
            #stackerList['RepulsiveRandomDitherPerNight'] = [myStackers.RepulsiveRandomDitherPerNightStacker(degrees=raDecInDeg,
            #                                                                                                randomSeed=1000)]
            #stackerList['RepulsiveRandomDitherFieldPerNight'] = [myStackers.RepulsiveRandomDitherFieldPerNightStacker(degrees=raDecInDeg,
            #                                                                                                          randomSeed=1000)]
            #stackerList['RepulsiveRandomDitherFieldPerVisit'] = [myStackers.RepulsiveRandomDitherFieldPerVisitStacker(degrees=raDecInDeg,
            #                                                                                                          randomSeed=1000)]
            # set up slicers for different dithers
            # random dithers on different timescales
            slicer['RandomDitherPerNight'] = slicers.HealpixSlicer(
                lonCol='randomDitherPerNightRa',
                latCol='randomDitherPerNightDec',
                latLonDeg=raDecInDeg,
                nside=nside,
                useCache=False)
            slicer['RandomDitherFieldPerNight'] = slicers.HealpixSlicer(
                lonCol='randomDitherFieldPerNightRa',
                latCol='randomDitherFieldPerNightDec',
                latLonDeg=raDecInDeg,
                nside=nside,
                useCache=False)
            slicer['RandomDitherFieldPerVisit'] = slicers.HealpixSlicer(
                lonCol='randomDitherFieldPerVisitRa',
                latCol='randomDitherFieldPerVisitDec',
                latLonDeg=raDecInDeg,
                nside=nside,
                useCache=False)
            # rep random dithers on different timescales
            #slicer['RepulsiveRandomDitherPerNight'] = slicers.HealpixSlicer(lonCol='repulsiveRandomDitherPerNightRa',
            #                                                               latCol='repulsiveRandomDitherPerNightDec',
            #                                                               latLonDeg=raDecInDeg, nside=nside, useCache=False)
            #slicer['RepulsiveRandomDitherFieldPerNight'] = slicers.HealpixSlicer(lonCol='repulsiveRandomDitherFieldPerNightRa',
            #                                                                    latCol='repulsiveRandomDitherFieldPerNightDec',
            #                                                                    latLonDeg=raDecInDeg, nside=nside,
            #                                                                    useCache=False)
            #slicer['RepulsiveRandomDitherFieldPerVisit'] = slicers.HealpixSlicer(lonCol='repulsiveRandomDitherFieldPerVisitRa',
            #                                                                    latCol='repulsiveRandomDitherFieldPerVisitDec',
            #                                                                    latLonDeg=raDecInDeg, nside=nside,
            #                                                                    useCache=False)
            # spiral dithers on different timescales
            slicer['FermatSpiralDitherPerNight'] = slicers.HealpixSlicer(
                lonCol='fermatSpiralDitherPerNightRa',
                latCol='fermatSpiralDitherPerNightDec',
                latLonDeg=raDecInDeg,
                nside=nside,
                useCache=False)
            slicer['FermatSpiralDitherFieldPerNight'] = slicers.HealpixSlicer(
                lonCol='fermatSpiralDitherFieldPerNightRa',
                latCol='fermatSpiralDitherFieldPerNightDec',
                latLonDeg=raDecInDeg,
                nside=nside,
                useCache=False)
            slicer['FermatSpiralDitherFieldPerVisit'] = slicers.HealpixSlicer(
                lonCol='fermatSpiralDitherFieldPerVisitRa',
                latCol='fermatSpiralDitherFieldPerVisitDec',
                latLonDeg=raDecInDeg,
                nside=nside,
                useCache=False)
            # hex dithers on different timescales
            slicer['SequentialHexDitherPerNight'] = slicers.HealpixSlicer(
                lonCol='hexDitherPerNightRa',
                latCol='hexDitherPerNightDec',
                latLonDeg=raDecInDeg,
                nside=nside,
                useCache=False)
            slicer['SequentialHexDitherFieldPerNight'] = slicers.HealpixSlicer(
                lonCol='hexDitherFieldPerNightRa',
                latCol='hexDitherFieldPerNightDec',
                latLonDeg=raDecInDeg,
                nside=nside,
                useCache=False)
            slicer['SequentialHexDitherFieldPerVisit'] = slicers.HealpixSlicer(
                lonCol='hexDitherFieldPerVisitRa',
                latCol='hexDitherFieldPerVisitDec',
                latLonDeg=raDecInDeg,
                nside=nside,
                useCache=False)
            # per season dithers
            slicer['PentagonDitherPerSeason'] = slicers.HealpixSlicer(
                lonCol='pentagonDitherPerSeasonRa',
                latCol='pentagonDitherPerSeasonDec',
                latLonDeg=raDecInDeg,
                nside=nside,
                useCache=False)
            slicer['PentagonDiamondDitherPerSeason'] = slicers.HealpixSlicer(
                lonCol='pentagonDiamondDitherPerSeasonRa',
                latCol='pentagonDiamondDitherPerSeasonDec',
                latLonDeg=raDecInDeg,
                nside=nside,
                useCache=False)
            slicer['SpiralDitherPerSeason'] = slicers.HealpixSlicer(
                lonCol='spiralDitherPerSeasonRa',
                latCol='spiralDitherPerSeasonDec',
                latLonDeg=raDecInDeg,
                nside=nside,
                useCache=False)
    if specifiedDith is not None:
        stackerList_, slicer_ = {}, {}
        if specifiedDith in slicer.keys():
            if specifiedDith.__contains__(
                    'Random'
            ):  # only Random dithers have a stacker object for rand seed specification
                stackerList_[specifiedDith] = stackerList[specifiedDith]
            slicer_[specifiedDith] = slicer[specifiedDith]
        else:
            raise ValueError(
                'Invalid value for specifiedDith: %s. Allowed values include one of the following:\n%s'
                % (specifiedDith, slicer.keys()))
        stackerList, slicer = stackerList_, slicer_

    # ------------------------------------------------------------------------
    if slair:
        m5Col = 'fivesigmadepth'
    else:
        m5Col = 'fiveSigmaDepth'
    # set up the metric
    if includeDustExtinction:
        # include dust extinction when calculating the co-added depth
        coaddMetric = metrics.ExgalM5(m5Col=m5Col, lsstFilter=filterBand)
    else:
        coaddMetric = metrics.Coaddm5Metric(m5col=m5col)
    dustMap = maps.DustMap(
        interp=False, nside=nside
    )  # include dustMap; actual in/exclusion of dust is handled by the galaxyCountMetric

    # ------------------------------------------------------------------------
    # set up the bundle
    coaddBundle = {}
    for dither in slicer:
        if dither in stackerList:
            coaddBundle[dither] = metricBundles.MetricBundle(
                coaddMetric,
                slicer[dither],
                sqlconstraint,
                stackerList=stackerList[dither],
                runName=runName,
                metadata=dither,
                mapsList=[dustMap])
        else:
            coaddBundle[dither] = metricBundles.MetricBundle(
                coaddMetric,
                slicer[dither],
                sqlconstraint,
                runName=runName,
                metadata=dither,
                mapsList=[dustMap])

    # ------------------------------------------------------------------------
    # run the analysis
    if includeDustExtinction:
        print('\n# Running coaddBundle with dust extinction ...')
    else:
        print('\n# Running coaddBundle without dust extinction ...')
    cGroup = metricBundles.MetricBundleGroup(coaddBundle,
                                             opsdb,
                                             outDir=outDir,
                                             resultsDb=resultsDb,
                                             saveEarly=False)
    cGroup.runAll()

    # ------------------------------------------------------------------------
    # plot and save the data
    plotBundleMaps(path,
                   outDir,
                   coaddBundle,
                   dataLabel='$%s$-band Coadded Depth' % filterBand,
                   filterBand=filterBand,
                   dataName='%s-band Coadded Depth' % filterBand,
                   skymap=plotSkymap,
                   powerSpectrum=plotPowerSpectrum,
                   cartview=plotCartview,
                   colorMin=unmaskedColorMin,
                   colorMax=unmaskedColorMax,
                   nTicks=nTicks,
                   showPlots=showPlots,
                   saveFigs=saveFigs,
                   outDirNameForSavedFigs='coaddM5Plots_unmaskedBorders')
    print('\n# Done saving plots without border masking.\n')

    # ------------------------------------------------------------------------
    plotHandler = plots.PlotHandler(outDir=outDir,
                                    resultsDb=resultsDb,
                                    thumbnail=False,
                                    savefig=False)

    print(
        '# Number of pixels in the survey region (before masking the border):')
    for dither in coaddBundle:
        print(
            '  %s: %s' %
            (dither,
             len(np.where(coaddBundle[dither].metricValues.mask == False)[0])))

    # ------------------------------------------------------------------------
    # save the unmasked data?
    if saveunMaskedCoaddData:
        outDir_new = 'unmaskedCoaddData'
        if not os.path.exists('%s%s/%s' % (path, outDir, outDir_new)):
            os.makedirs('%s%s/%s' % (path, outDir, outDir_new))
        saveBundleData_npzFormat('%s%s/%s' % (path, outDir, outDir_new),
                                 coaddBundle, 'coaddM5Data_unmasked',
                                 filterBand)

    # ------------------------------------------------------------------------
    # mask the edges
    print('\n# Masking the edges for coadd ...')
    coaddBundle = maskingAlgorithmGeneralized(
        coaddBundle,
        plotHandler,
        dataLabel='$%s$-band Coadded Depth' % filterBand,
        nside=nside,
        pixelRadius=pixelRadiusForMasking,
        plotIntermediatePlots=False,
        plotFinalPlots=False,
        printFinalInfo=True)
    if (pixelRadiusForMasking > 0):
        # plot and save the masked data
        plotBundleMaps(path,
                       outDir,
                       coaddBundle,
                       dataLabel='$%s$-band Coadded Depth' % filterBand,
                       filterBand=filterBand,
                       dataName='%s-band Coadded Depth' % filterBand,
                       skymap=plotSkymap,
                       powerSpectrum=plotPowerSpectrum,
                       cartview=plotCartview,
                       colorMin=maskedColorMin,
                       colorMax=maskedColorMax,
                       nTicks=nTicks,
                       showPlots=showPlots,
                       saveFigs=saveFigs,
                       outDirNameForSavedFigs='coaddM5Plots_maskedBorders')
        print('\n# Done saving plots with border masking. \n')

    # ------------------------------------------------------------------------
    # Calculate total power
    summarymetric = metrics.TotalPowerMetric()
    for dither in coaddBundle:
        coaddBundle[dither].setSummaryMetrics(summarymetric)
        coaddBundle[dither].computeSummaryStats()
        print('# Total power for %s case is %f.' %
              (dither, coaddBundle[dither].summaryValues['TotalPower']))
    print('')

    # ------------------------------------------------------------------------
    # run the alm analysis
    if almAnalysis:
        almPlots(path,
                 outDir,
                 copy.deepcopy(coaddBundle),
                 nside=nside,
                 filterband=filterBand,
                 raRange=raRange,
                 decRange=decRange,
                 showPlots=showPlots)
    # ------------------------------------------------------------------------
    # save the masked data?
    if saveMaskedCoaddData and (pixelRadiusForMasking > 0):
        outDir_new = 'maskedCoaddData'
        if not os.path.exists('%s%s/%s' % (path, outDir, outDir_new)):
            os.makedirs('%s%s/%s' % (path, outDir, outDir_new))
        saveBundleData_npzFormat('%s%s/%s' % (path, outDir, outDir_new),
                                 coaddBundle, 'coaddM5Data_masked', filterBand)

    # ------------------------------------------------------------------------
    # plot comparison plots
    if len(coaddBundle.keys()) > 1:  # more than one key
        # set up the directory
        outDir_comp = 'coaddM5ComparisonPlots'
        if not os.path.exists('%s%s/%s' % (path, outDir, outDir_comp)):
            os.makedirs('%s%s/%s' % (path, outDir, outDir_comp))
        # ------------------------------------------------------------------------
        # plot for the power spectra
        cl = {}
        for dither in plotColor:
            if dither in coaddBundle:
                cl[dither] = hp.anafast(hp.remove_dipole(
                    coaddBundle[dither].metricValues.filled(
                        coaddBundle[dither].slicer.badval)),
                                        lmax=500)
                ell = np.arange(np.size(cl[dither]))
                plt.plot(ell, (cl[dither] * ell * (ell + 1)) / 2.0 / np.pi,
                         color=plotColor[dither],
                         linestyle='-',
                         label=dither)
        plt.xlabel(r'$\ell$')
        plt.ylabel(r'$\ell(\ell+1)C_\ell/(2\pi)$')
        plt.xlim(0, 500)
        fig = plt.gcf()
        fig.set_size_inches(12.5, 10.5)
        leg = plt.legend(labelspacing=0.001)
        for legobj in leg.legendHandles:
            legobj.set_linewidth(4.0)
        filename = 'powerspectrum_comparison_all.png'
        plt.savefig('%s%s/%s/%s' % (path, outDir, outDir_comp, filename),
                    bbox_inches='tight',
                    format='png')
        plt.show()

        # create the histogram
        scale = hp.nside2pixarea(nside, degrees=True)

        def tickFormatter(y, pos):
            return '%d' % (y * scale)  # convert pixel count to area

        binsize = 0.01
        for dither in plotColor:
            if dither in coaddBundle:
                ind = np.where(
                    coaddBundle[dither].metricValues.mask == False)[0]
                binAll = int(
                    (max(coaddBundle[dither].metricValues.data[ind]) -
                     min(coaddBundle[dither].metricValues.data[ind])) /
                    binsize)
                plt.hist(coaddBundle[dither].metricValues.data[ind],
                         bins=binAll,
                         label=dither,
                         histtype='step',
                         color=plotColor[dither])
        ax = plt.gca()
        ymin, ymax = ax.get_ylim()
        nYticks = 10.
        wantedYMax = ymax * scale
        wantedYMax = 10. * np.ceil(float(wantedYMax) / 10.)
        increment = 5. * np.ceil(float(wantedYMax / nYticks) / 5.)
        wantedArray = np.arange(0, wantedYMax, increment)
        ax.yaxis.set_ticks(wantedArray / scale)
        ax.yaxis.set_major_formatter(FuncFormatter(tickFormatter))
        plt.xlabel('$%s$-band Coadded Depth' % filterBand)
        plt.ylabel('Area (deg$^2$)')
        fig = plt.gcf()
        fig.set_size_inches(12.5, 10.5)
        leg = plt.legend(labelspacing=0.001, loc=2)
        for legobj in leg.legendHandles:
            legobj.set_linewidth(2.0)
        filename = 'histogram_comparison.png'
        plt.savefig('%s%s/%s/%s' % (path, outDir, outDir_comp, filename),
                    bbox_inches='tight',
                    format='png')
        plt.show()
        # ------------------------------------------------------------------------
        # plot power spectra for the separte panel
        totKeys = len(list(coaddBundle.keys()))
        if (totKeys > 1):
            plt.clf()
            nCols = 2
            nRows = int(np.ceil(float(totKeys) / nCols))
            fig, ax = plt.subplots(nRows, nCols)
            plotRow = 0
            plotCol = 0
            for dither in list(plotColor.keys()):
                if dither in list(coaddBundle.keys()):
                    ell = np.arange(np.size(cl[dither]))
                    ax[plotRow, plotCol].plot(ell, (cl[dither] * ell *
                                                    (ell + 1)) / 2.0 / np.pi,
                                              color=plotColor[dither],
                                              label=dither)
                    if (plotRow == nRows - 1):
                        ax[plotRow, plotCol].set_xlabel(r'$\ell$')
                    ax[plotRow,
                       plotCol].set_ylabel(r'$\ell(\ell+1)C_\ell/(2\pi)$')
                    ax[plotRow,
                       plotCol].yaxis.set_major_locator(MaxNLocator(3))
                    if (dither != 'NoDither'):
                        ax[plotRow, plotCol].set_ylim(0, 0.0035)
                    ax[plotRow, plotCol].set_xlim(0, 500)
                    plotRow += 1
                    if (plotRow > nRows - 1):
                        plotRow = 0
                        plotCol += 1
            fig.set_size_inches(20, int(nRows * 30 / 7.))
            filename = 'powerspectrum_sepPanels.png'
            plt.savefig('%s%s/%s/%s' % (path, outDir, outDir_comp, filename),
                        bbox_inches='tight',
                        format='png')
            plt.show()
    return coaddBundle, outDir
        obsFiles = args.obsFile
    else:
        obsFiles = [args.obsFile]
    print(f'Will loop through {len(obsFiles)} observation files')
    # Set up resultsDb.
    if not (os.path.isdir(args.outDir)):
        os.makedirs(args.outDir)
    resultsDb = db.ResultsDb(outDir=args.outDir)

    Hrange = np.arange(args.hMin, args.hMax + args.hStep, args.hStep)
    if args.hMark is None:
        hIdx = int(len(Hrange) / 2)
        args.hMark = Hrange[hIdx]

    if args.opsimDb is not None:
        opsdb = db.OpsimDatabase(args.opsimDb)
        colmap = batches.getColMap(opsdb)
        opsdb.close()
    else:
        # Use the default (currently, v4).
        colmap = batches.ColMapDict()

    # Loop through calculation of metrics
    tempRoot = 'quick_subset'
    for i, obsFile in enumerate(obsFiles):
        slicer = batches.setupMoSlicer(args.orbitFile, Hrange, obsFile=obsFile)
        # Run discovery metrics using 'trailing' losses
        bdictD, pbundleD = batches.quickDiscoveryBatch(
            slicer,
            colmap=colmap,
            runName=args.opsimRun,
Beispiel #14
0
                      pad=0.1,
                      orientation='horizontal',
                      format=None,
                      extendrect=True)
    cb.set_label(unit, fontsize=None)
    tick_locator = ticker.MaxNLocator(nbins=nbins)
    cb.locator = tick_locator
    cb.update_ticks()
    cb.solids.set_edgecolor("face")
    return cb


nside = 128
# Connect to an opsim database, bleeding-edge sims here:
# https://lsst-web.ncsa.illinois.edu/sim-data/sims_featureScheduler_runs/
opsdb = db.OpsimDatabase('baseline_1exp_pairsmix_10yrs.db')
outDir = 'maf_out'
resultsDb = db.ResultsDb(outDir=outDir)
plotFuncs = [plots.TwoDMap()]

filters = ['u', 'g', 'r', 'i', 'z', 'y']
day_max = np.round(365.25 * 2)
bins = np.arange(day_max)

nval = 3

for filtername in filters:
    metric = metrics.AccumulateCountMetric(bins=bins)
    slicer = slicers.HealpixSlicer(nside=nside)
    plotDict = {'xlabel': 'Night (days)', 'cbarTitle': 'N obs', 'colorMax': 75}
    # only use i-band
Beispiel #15
0
def save_csv_dithers(dbs_path,
                     outDir,
                     db_files_only=None,
                     rot_rand_seed=42,
                     trans_rand_seed=42,
                     print_progress=True,
                     show_diagnostic_plots=False,
                     save_plots=False):
    """
    
    The goal here is to calculate the translational and rotational dithers for
    various cadences and save the output as a csv file.  These dithers are largely
    the same as in DC1/DC2: 
        - Translational dithers:
            - WFD: large random offsets (as large as 1.75 deg) applied after every visit.
            - DD: small random offsets (as large as 7 arcmin) applied after every visit.
            - Else: no dithers, so `fieldRA`, `fieldDec` are returned.
        - Rotational dithers:
            - All surveys (WFD, DD, else): random between -90, 90 degrees applied after
                                           every filter change. (Break from DC2: Some visits
                                           dont get dithered since they are forced outside
                                           the rotator range.
                                           See RotStacker info for details.)
             
    Supports OpSim V3/V4 outputs.
    
    Required Inputs
    ---------------
    * dbs_path: str: path to the directory that contains the .db files; could have non-.db files.
    * outDir: str: path to the directory where the output should be saved.
    
    Optional Inputs
    ---------------
    * db_files_only: list of str: list of names of the db files to run.
                                  Default: None. Runs over all the files in db_path.
    * rot_rand_seed: int: seed for random number generator for rotational dithers.
                          Default: 42
    * trans_rand_seed: int: seed for random number generator for translational dithers.
                            Default: 42
    * print_progress: bool: set to False to not print progress.
                            Default: True
    * show_diagnostic_plots: bool: set to True to show histogram of added dithers.
                                   Default: False
    * save_plots: bool: set to True to save the histogram for descDithers in outDir.
                        Default: False
                                   
    Saved file format
    -----------------
    .csv file with four columns:
        obsIDcol, 'descDitheredRA', 'descDitheredDec', 'descDitheredRotTelPos'
    where
        obsIDcol = 'observationId' for V4 outputs and 'obsHistID' for V3 outputs.
    
    Saved filename = descDithers_<database name>.csv
    
    """
    startTime_0 = time.time()
    readme = '##############################\n%s' % (datetime.date.isoformat(
        datetime.date.today()))
    readme += '\nRunning with lsst.sims.maf.__version__: %s' % lsst.sims.maf.__version__
    readme += '\n\nsave_csv_dithers run:\ndbs_path= %s\n' % dbs_path
    readme += 'outDir: %s' % outDir
    readme += 'db_files_only: %s' % db_files_only
    readme += 'rot_rand_seed=%s\ntrans_rand_seed=%s' % (rot_rand_seed,
                                                        trans_rand_seed)
    readme += 'print_progress=%s\show_diagnostic_plots=%s\n' % (
        print_progress, show_diagnostic_plots)

    dbfiles = [f for f in os.listdir(dbs_path)
               if f.endswith('db')]  # select db files
    if print_progress: print('Found files: %s\n' % dbfiles)

    if db_files_only is not None:
        dbfiles = [f for f in dbfiles if f in db_files_only]  # select db files

    readme += '\nReading for files: %s\n\n' % dbfiles
    if print_progress and db_files_only is not None:
        print('Running over: %s\n' % dbfiles)

    for i, dbfile in enumerate(dbfiles):  # loop over all the db files
        startTime = time.time()
        if (i != 0): readme = ''
        readme += '%s' % dbfile

        if print_progress: print('Starting: %s\n' % dbfile)

        opsdb = db.OpsimDatabase('%s/%s' %
                                 (dbs_path, dbfile))  # connect to the database

        # specify the column names to get from the db file
        colnames = [
            'proposalId', 'observationId', 'fieldRA', 'fieldDec', 'rotTelPos'
        ]
        propIDcol, obsIDcol = 'proposalId', 'observationId'

        if (opsdb.opsimVersion == 'V3'):
            # V3 outputs have somewhat different column names
            colnames = [
                'propID', 'obsHistID', 'fieldRA', 'fieldDec', 'rotTelPos'
            ]
            propIDcol, obsIDcol = 'propID', 'obsHistID'

        # get the data
        simdata = opsdb.fetchMetricData(colnames=colnames, sqlconstraint=None)

        # set up to run the stackers that add columns for translational and rotational dithers.
        metric = metrics.PassMetric(
        )  # want to access the database; no analysis needed
        slicer = slicers.OneDSlicer(
            sliceColName='night', binsize=1,
            verbose=print_progress)  # essentially accessing all nights
        sqlconstraint = None

        resultsDb = db.ResultsDb(outDir=outDir)
        ################################################################################################
        # set up metric bundle to run stackers for large translational dithers + rotational dithers
        if print_progress:
            print('Setting up for WFD translational dithers + rot dithers.')
        bgroup = {}
        stackerList = [
            stackers.RandomDitherFieldPerVisitStacker(
                degrees=opsdb.raDecInDeg, randomSeed=trans_rand_seed),
            stackers.RandomRotDitherPerFilterChangeStacker(
                degrees=opsdb.raDecInDeg, randomSeed=rot_rand_seed)
        ]

        bundle = metricBundles.MetricBundle(metric,
                                            slicer,
                                            sqlconstraint=sqlconstraint,
                                            stackerList=stackerList)
        bgroup['WFD'] = metricBundles.MetricBundleGroup({0: bundle},
                                                        opsdb,
                                                        outDir=outDir,
                                                        resultsDb=resultsDb,
                                                        saveEarly=False,
                                                        verbose=print_progress)
        # run the bundle
        bgroup['WFD'].runAll()

        # set up the bundle for small translational dithers
        if print_progress: print('\nSetting up for DD translational dithers.')
        chipSize = 1.75 * 2 / 15
        chipMaxDither = chipSize / 2.
        stackerList = [
            stackers.RandomDitherFieldPerVisitStacker(
                maxDither=chipMaxDither,
                degrees=opsdb.raDecInDeg,
                randomSeed=trans_rand_seed)
        ]
        bundle = metricBundles.MetricBundle(metric,
                                            slicer,
                                            sqlconstraint=sqlconstraint,
                                            stackerList=stackerList)

        bgroup['DD'] = metricBundles.MetricBundleGroup({0: bundle},
                                                       opsdb,
                                                       outDir=outDir,
                                                       resultsDb=resultsDb,
                                                       saveEarly=False,
                                                       verbose=print_progress)
        # run the bundle
        bgroup['DD'].runAll()

        ################################################################################################
        # access the relevant columns
        dithered_RA, dithered_Dec = {}, {}
        for key in bgroup:
            dithered_RA[key] = bgroup[key].simData[
                'randomDitherFieldPerVisitRa']
            dithered_Dec[key] = bgroup[key].simData[
                'randomDitherFieldPerVisitDec']

        dithered_rotTelPos = bgroup['WFD'].simData[
            'randomDitherPerFilterChangeRotTelPos']

        ################################################################################################
        # diagnostic plots
        if show_diagnostic_plots:
            # histograms of dithers
            fig, axes = plt.subplots(nrows=1, ncols=3)

            for key in bgroup:
                # ra
                axes[0].hist(dithered_RA[key] - simdata['fieldRA'],
                             label='%s dithers: delRA' % key,
                             histtype='step',
                             lw=2,
                             bins=30)

                # dec
                axes[1].hist(dithered_Dec[key] - simdata['fieldDec'],
                             label='%s dithers: delDec' % key,
                             histtype='step',
                             lw=2)

            # tel pos
            axes[2].hist(dithered_rotTelPos - simdata['rotTelPos'],
                         label='rot dithers: rotTelPos',
                         histtype='step',
                         lw=2)
            for ax in axes:
                ax.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
                ax.set_ylabel('Counts')

            axes[0].legend()
            axes[1].legend()

            if opsdb.raDecInDeg: unitlabel = 'degrees'
            else: unitlabel = 'radians'

            axes[0].set_xlabel('delRA (%s)' % unitlabel)
            axes[1].set_xlabel('delDec (%s)' % unitlabel)
            axes[2].set_xlabel('delRotTelPos (%s)' % unitlabel)

            plt.title(dbfile)
            fig.set_size_inches(20, 5)

        ################################################################################################
        # initiate the final arrays as undithered fieldRA, fieldDec as nonWFD, nonDDF should remain unchanged
        descDitheredRA = simdata['fieldRA'].copy()
        descDitheredDec = simdata['fieldDec'].copy()
        descDitheredRot = simdata['rotTelPos'].copy()

        # need to find the indices for WFD vs. DD observations since we are adding different
        # translational dithers for WFD/DDF visits + none for other surveys
        propIds, propTags = opsdb.fetchPropInfo()
        # ok work with WFD visits now
        ind_WFD = np.where(simdata[propIDcol] == propTags['WFD'])[0]
        if print_progress:
            tot = len(simdata)
            print('Total visits: ', tot)
            print('propTags: ', propTags)
            print('%s WFD visits out of total %s' % (len(ind_WFD), tot))

        descDitheredRA[ind_WFD] = dithered_RA['WFD'][ind_WFD]
        descDitheredDec[ind_WFD] = dithered_Dec['WFD'][ind_WFD]

        # work with DD visits now
        ind_DD = np.where(simdata[propIDcol] == propTags['DD'])[0]
        if print_progress:
            print('%s DD visits out of total %s' % (len(ind_DD), tot))

        descDitheredRA[ind_DD] = dithered_RA['DD'][ind_DD]
        descDitheredDec[ind_DD] = dithered_Dec['DD'][ind_DD]

        # add rotational dithers to everything
        descDitheredRot = dithered_rotTelPos

        ###############################################################
        # diagnostic plots
        if show_diagnostic_plots or save_plots:
            # histograms of desc dithered positions
            fig, axes = plt.subplots(nrows=1, ncols=3)

            _, bins, _ = axes[0].hist(descDitheredRA,
                                      label='descDitheredRA',
                                      histtype='step',
                                      lw=2)
            axes[0].hist(simdata['fieldRA'],
                         label='fieldRA',
                         histtype='step',
                         lw=2,
                         bins=bins)

            _, bins, _ = axes[1].hist(descDitheredDec,
                                      label='descDitheredDec',
                                      histtype='step',
                                      lw=2)
            axes[1].hist(simdata['fieldDec'],
                         label='fieldDec',
                         histtype='step',
                         lw=2,
                         bins=bins)

            _, bins, _ = axes[2].hist(descDitheredRot,
                                      label='descDitheredRot',
                                      histtype='step',
                                      lw=2)
            axes[2].hist(simdata['rotTelPos'],
                         label='rotTelPos',
                         histtype='step',
                         lw=2,
                         bins=bins)

            if opsdb.raDecInDeg: xlabel = 'degrees'
            else: xlabel = 'radians'

            for ax in axes:
                ax.legend()
                ax.set_xlabel(xlabel)
                ax.set_ylabel('Counts')

            plt.suptitle(dbfile)
            fig.set_size_inches(20, 5)

            if save_plots:
                filename = 'hist_descDithers_%s.png' % (dbfile.split('.db')[0])
                plt.savefig('%s/%s' % (outDir, filename),
                            format='png',
                            bbox_inches='tight')
                readme += '\nSaved hist for descDithers in %s.' % filename

                if print_progress:
                    print('\nSaved hist plot in %s' % filename)

            if show_diagnostic_plots:
                plt.show()
            else:
                plt.close('all')

        ###############################################################
        # save the columns as a csv file.
        d = {
            obsIDcol: simdata[obsIDcol],
            'descDitheredRA': descDitheredRA,
            'descDitheredDec': descDitheredDec,
            'descDitheredRotTelPos': descDitheredRot
        }

        filename = 'descDithers_%s.csv' % (dbfile.split('.db')[0])
        pd.DataFrame(d).to_csv('%s/%s' % (outDir, filename), index=False)

        readme += '\nSaved the dithers in %s' % filename
        readme += '\nTime taken: %.2f (min)\n\n' % (
            (time.time() - startTime) / 60.)

        if print_progress:
            print('\nSaved the dithers in %s' % filename)
            print('Time taken: %.2f (min)\n\n' %
                  ((time.time() - startTime) / 60.))

        readme_file = open('%s/readme.txt' % (outDir), 'a')
        readme_file.write(readme)
        readme_file.close()

    # mark the end in the readme.
    readme_file = open('%s/readme.txt' % (outDir), 'a')
    readme_file.write('All done. Total time taken: %.2f (min)\n\n' %
                      ((time.time() - startTime_0) / 60.))
    readme_file.close()
Beispiel #16
0
import matplotlib.pyplot as plt
import lsst.sims.maf.db as db
import lsst.sims.maf.utils as utils
import lsst.sims.maf.metrics as metrics
import lsst.sims.maf.slicers as slicers
import lsst.sims.maf.sliceMetrics as sliceMetrics

oo = db.OpsimDatabase('sqlite:///../opsimblitz1_1131_sqlite.db')

cols = ['fieldID', 'fieldRA', 'fieldDec']
simdata = oo.fetchMetricData(cols, '')
fielddata = oo.fetchFieldsFromFieldTable()

# Add dither column
randomdither = utils.RandomDither(maxDither=1.8, randomSeed=42)
simdata = randomdither.run(simdata)

# Add columns showing the actual dither values
# Note that because RA is wrapped around 360, there will be large values of 'radith' near this point
basestacker = utils.BaseStacker()
basestacker.colsAdded = ['radith', 'decdith']
simdata = basestacker._addStackers(simdata)
simdata['radith'] = simdata['randomRADither'] - simdata['fieldRA']
simdata['decdith'] = simdata['randomDecDither'] - simdata['fieldDec']

metriclist = []
metriclist.append(metrics.MeanMetric('radith'))
metriclist.append(metrics.MeanMetric('decdith'))
metriclist.append(metrics.RmsMetric('radith'))
metriclist.append(metrics.RmsMetric('decdith'))
metriclist.append(metrics.FullRangeMetric('radith'))
def run(config_filename):
    # YAML input file.
    config = yaml.load(open(config_filename), Loader=yaml.FullLoader)
    # print(config)
    outDir = 'Test'  # this is for MAF

    # grab the db filename from yaml input file
    dbFile = config['Observations']['filename']
    """
    conn = sqlite3.connect(dbFile)
    cur = conn.cursor()
    table_name='Proposal'
    result = cur.execute("PRAGMA table_info('%s')" % table_name).fetchall()
    print('Results',result)

    cur.execute("SELECT * FROM Proposal")
    rows = cur.fetchall()
    for row in rows:
        print(row)
    print('end')
    cur.execute('PRAGMA TABLE_INFO({})'.format('ObsHistory'))

    names = [tup[1] for tup in cur.fetchall()]
    print(names)
    """
    opsimdb = db.OpsimDatabase(dbFile)
    version = opsimdb.opsimVersion
    propinfo, proptags = opsimdb.fetchPropInfo()
    print('proptags and propinfo', proptags, propinfo)

    # grab the fieldtype (DD or WFD) from yaml input file
    fieldtype = config['Observations']['fieldtype']

    module = import_module(config['Metric'])

    slicer = slicers.HealpixSlicer(nside=config['Pixelisation']['nside'])

    sqlconstraint = opsimdb.createSQLWhere(fieldtype, proptags)

    bundles = []
    names = []
    SNR = dict(
        zip(config['Observations']['bands'], config['Observations']['SNR']))
    mag_range = config['Observations']['mag_range']
    dt_range = config['Observations']['dt_range']
    for band in SNR.keys():
        sql_i = sqlconstraint + ' AND '
        sql_i += 'filter = "%s"' % (band)
        # sql_i += ' AND '
        # sql_i +=  'season= "%s"' % (season)
        metric = module.SNCadenceMetric(config=config,
                                        coadd=config['Observations']['coadd'])
        bundles.append(metricBundles.MetricBundle(metric, slicer, sql_i))
        names.append(band)

        print('sql', sql_i)

    bdict = dict(zip(names, bundles))

    resultsDb = db.ResultsDb(outDir='None')
    mbg = metricBundles.MetricBundleGroup(bdict,
                                          opsimdb,
                                          outDir=outDir,
                                          resultsDb=resultsDb)

    result = mbg.runAll()

    # Let us display the results

    for band, val in bdict.items():
        metValues = val.metricValues[~val.metricValues.mask],
        res = None
        for val in metValues:
            for vval in val:
                if res is None:
                    res = vval
                else:
                    res = np.concatenate((res, vval))
        res = np.unique(res)
        sn_plot.plotCadence(band,
                            config['Li file'],
                            config['Mag_to_flux file'],
                            SNR[band],
                            res,
                            config['names_ref'],
                            mag_range=mag_range,
                            dt_range=dt_range)

    # mbg.writeAll()
    # mbg.plotAll(closefigs=False)
    # mbg.plot()
    plt.show()
Beispiel #18
0
 def setUp(self):
     self.database = os.path.join(os.getenv('SIMS_MAF_DIR'), 'tests',
                                  'opsimblitz1_1133_sqlite.db')
     self.oo = db.OpsimDatabase(database=self.database)
import numpy as np
from lsst.sims.selfcal.generation import genCatalog, offsets, visitOffsets
import lsst.sims.maf.db as db

lsstFilter = 'r'

# Read in an Opsim database
opsimDB = db.OpsimDatabase('sqlite:///opsimblitz2_1060_sqlite.db')
#ralim=np.array([0,360])*np.pi/180.
#declim=np.array([0,-90])*np.pi/180.
ralim = np.radians(np.array([40, 60]))
declim = np.radians(np.array([-40, -60]))
nightMax = 730

cols = [
    'ditheredRA', 'ditheredDec', 'rotSkyPos', 'night', 'expMJD',
    'fiveSigmaDepth', 'obsHistID', 'transparency'
]

visits = opsimDB.fetchMetricData(
    cols,
    'ditheredRA < %f and ditheredRA > %f and ditheredDec > %f and ditheredDec < %f and  filter="%s" and night < %i'
    % (ralim[1], ralim[0], declim[1], declim[0], lsstFilter, nightMax))

# Make dtype names more generic and add any other stuff we want:
visits = visitOffsets(visits, zpOff=1.)

offsetList = []
# Systematic error floor for photometry
offsetList.append(offsets.OffsetSys())
# SNR
Beispiel #20
0
from astropy.table import Table, Column
from scipy import optimize
import math
from astropy.io import ascii

#asciiLC is the template light curve
asciiLC = 'supernova1b_template.dat'
filterNames = ['u', 'g', 'r', 'i', 'z']
colors = {'u': 'purple', 'g': 'g', 'r': 'r', 'i': 'blue', 'z': 'm'}
#create list of peak days and magnitudes to iterate over
day_of_peak = np.arange(59580, 63232, 30)
mag_of_peak = np.arange(17, 25, 1)

# Set the database and query
runName = 'minion_1018'
opsdb = db.OpsimDatabase(runName + '_sqlite.db')

# Set the output directory
outDir = 'Observations Dictionary'
resultsDb = db.ResultsDb(outDir)

# This creates our database of observations. The pass metric just passes data straight through.
metric = metrics.PassMetric(cols=['expMJD', 'filter', 'fiveSigmaDepth'])
"""use slicer to restrict the ra and decs, use np.random.uniform to get random points, 
	first coordinate represents ra and second dec. Or, give a list of specific
	ra and decs - the second slicer is for the deep drilling fields. One must be commented out."""
#slicer = slicers.UserPointsSlicer(np.random.uniform(0,360,1000), np.random.uniform(-80,0,1000))
slicer = slicers.UserPointsSlicer([349.4, 0.00, 53.0, 34.4, 150.4],
                                  [-63.3, -45.5, -27.4, -5.1, 2.8])
#sql is empty as there are no restrictions currently
sql = ''
out_file = sys.argv[2]
out_dir = sys.argv[2]

gal_l_min = 0.
gal_l_max = 360.
gal_b_min = -89.
gal_b_max = 89.
diameter = 3.5
step = diameter / np.sqrt(2) # This would be enough on a 2D plane.
step *= 0.85

gal_l_all = np.linspace(gal_l_min, gal_l_max, (gal_l_max-gal_l_min)/step+1)
gal_b_all = np.linspace(gal_b_min, gal_b_max, (gal_b_max-gal_b_min)/step+1)
(gal_l, gal_b) = np.meshgrid(gal_l_all, gal_b_all)

c = SkyCoord(gal_l.flatten(), gal_b.flatten(), unit=u.deg, frame='galactic')
userRA = c.fk5.ra.value
userDec = c.fk5.dec.value

columns = ['observationStartMJD', 'filter', 'fiveSigmaDepth']

metric = metrics.PassMetric(cols=columns) 
slicer = slicers.UserPointsSlicer(userRA, userDec)
sqlconstraint = ''
MJDmetric = metricBundles.MetricBundle(metric, slicer, sqlconstraint, 
                                                            fileRoot=out_file)
bundleDict = {'MJDmetric': MJDmetric}
opsdb = db.OpsimDatabase(database)
group = metricBundles.MetricBundleGroup(bundleDict, opsdb, outDir=out_dir)
group.runAll()
Beispiel #22
0
def go(nside=64, rmag=21., SedTemplate='flat', DoRun=False, LFilters = [], \
           LNightMax=[], nightMax=1e4, \
           CustomPlotLimits=True, \
           RunOne=False, MaxRuns=1e3, \
           SpatialClip=95., \
           seeingCol='FWHMeff', \
           sCmap='cubehelix_r', \
           checkCorrKind=False, \
           wfdPlane=True, \
           useGRIZ=False):

    # Go to the directory where the sqlite databases are held...

    # cd /Users/clarkson/Data/LSST/OpSimRuns/opsim20160411


    # WIC 2015-12-29 - set up for a master-run with all cases, this time with plotting limits
    # Break the specifications across lines to make subdivision easier
    
    # Subsets by time first, then by filter, finally the whole shebang

    # 2016-04-23 - replaced enigma_1189 --> minion_1016
    # 2016-04-23 - replaced ops2_1092 --> minion_1020
    
    
    # (Yes the inversion of the first two is deliberate.)
    runNames = ['minion_1016', 'minion_1020', 'minion_1020', 'minion_1016', \
                    'minion_1020', 'minion_1016', 'minion_1020', 'minion_1016', \
                    'minion_1020', 'minion_1016']
    LFilters = ['', '', '', '', \
                    'u', 'u', 'y', 'y', \
                    '', '']  
    LNightMax = [365, 365, 730, 730, \
                     1e4, 1e4, 1e4, 1e4, \
                     1e4, 1e4]

    # WIC try again, this time on the new astro_lsst_01_1004 only
    if wfdPlane:
        LFilters = ['', '', '', 'u', 'y']  
        LNightMax = [365, 730, 1e4, 1e4, 1e4]
        runNames = ['astro_lsst_01_1004' for i in range (len(LFilters)) ]

    # WIC 2016-05-01 check correlation
    if checkCorrKind:
        LFilters = ['', '']
        LNightMax = [365, 365]
        runNames = ['minion_1016', 'minion_1016']

        # Type of correlation used for HA Degen 
        # checkCorrKind = True
        useSpearmanR = [False, True]
    
    if useGRIZ:
        runNames=['minion_1016','astro_lsst_01_1004', 'minion_1020']
        LFilters = ['griz' for iRun in range(len(runNames)) ]
        #LNightMax = [1e4 for iRun in range(len(runNames)) ]
        #LNightMax = [730 for iRun in range(len(runNames)) ]
        LNightMax = [365 for iRun in range(len(runNames)) ]


    # List of upper limits to parallax and proper motion error. For parallax, 3.0 mas is probably good
    LUpperParallax = []
    LUpperPropmotion = []


    if CustomPlotLimits:
    
        LUpperParallax = [10, 10, 10, 10, \
                              10, 10, 40, 40, \
                              3.0, 3.0 ]

    
        # For proper motion, it's a little tricky to say because the
        # regular case is so pathological for the field. Try the following:
        LUpperPropmotion = [40, 40, 5, 20, \
                                3.5, 20, 3.5, 20, \
                                0.5, 5]

        if len(runNames) < 2:
            LUpperPropmotion = [100 for i in range(len(runNames))]

    print "runAstrom.go INFO - will run the following:"
    for iSho in range(len(runNames)):
        sFilThis = ''
        # print iSho, len(LFilters)
        if iSho <= len(LFilters):
            sFilThis = sqlFromFilterString(LFilters[iSho])

        print "%i: %-12s, %1s, %i, sqlFilter -- %s" % (iSho, runNames[iSho], LFilters[iSho], LNightMax[iSho], sFilThis)
    print "==========================="

    print "mag max = %.2f" % (rmag)
    print "---------------------------"

#    print runNames
#    if not DoRun:
#        print "Set DoRun=True to actually run this."
#        print len(LFilters), len(runNames), len(LFilters) == len(runNames)
#        return

#'kraken_1038', 'kraken_1034', 'ops2_1098']


    # nside = 64

    slicer = slicers.HealpixSlicer(nside=nside)

    # Make it so we don't bother with the silly power spectra
    plotFuncs = [plots.HealpixSkyMap(), plots.HealpixHistogram()]

    # WIC - back up the plotting arguments with a default value
    plotFuncsPristine = copy.deepcopy(plotFuncs)

    # WIC - the only way this will make sense to me is if I make a
    # dictionary of plot arguments. Let's try it...
    DPlotArgs = {}
    for plotArg in ['parallax', 'propmotion', 'coverage', 'HAdegen']:
        DPlotArgs[plotArg] = copy.deepcopy(plotFuncs)

    if CustomPlotLimits:

        # Use the same color map for all the metrics
        for plotMetric in DPlotArgs.keys():
            DPlotArgs[plotMetric][0].defaultPlotDict['cmap'] = sCmap


        # Apply spatial clipping for all but the HADegen, for which we
        # have other limits...
        for plotMetric in ['parallax', 'propmotion', 'coverage']:
            DPlotArgs[plotMetric][0].defaultPlotDict['percentileClip'] = SpatialClip

        # Some limits common to spatial maps and histograms
        for iPl in range(0,2):
            DPlotArgs['propmotion'][iPl].defaultPlotDict['logScale'] = True

        # NOT a loop because we might want to separate out the behavior

        # Standardized range for the histograms for new parallax metrics
        DPlotArgs['coverage'][1].defaultPlotDict['xMin'] = 0.
        DPlotArgs['coverage'][1].defaultPlotDict['xMax'] = 1.
        DPlotArgs['HAdegen'][1].defaultPlotDict['xMin'] = -1.
        DPlotArgs['HAdegen'][1].defaultPlotDict['xMax'] =  1.
            
        # Standardize the sky map for the HAdegen as well.
        DPlotArgs['coverage'][1].defaultPlotDict['xMin'] = 0.
        DPlotArgs['coverage'][1].defaultPlotDict['xMax'] = 1.
        DPlotArgs['HAdegen'][0].defaultPlotDict['xMin'] = -1.
        DPlotArgs['HAdegen'][0].defaultPlotDict['xMax'] =  1.


        # Standardize at least the lower bound of the histogram in
        # both the proper motion and parallax errors. Upper limit we
        # can customize with a loop.
        DPlotArgs['propmotion'][1].defaultPlotDict['xMin'] = 1e-2  # should not be zero if log scale!!
        DPlotArgs['parallax'][1].defaultPlotDict['xMin'] = 0.


    # WIC - try changing the plot dictionary

    if not DoRun:
        plotFuncs[0].defaultPlotDict['logScale'] = True
        print DPlotArgs['propmotion'][0].defaultPlotDict
        print DPlotArgs['propmotion'][1].defaultPlotDict
        
        return

    # The old runs have the seeing in finSeeing
    #seeingCol = 'finSeeing'

    ### UPDATE THE SEEING COLUMN
    #seeingCol = 'FWHMeff'   ## Moved up to a command-line argument


    # Use all the observations. Can change if you want a different
    # time span
    # sqlconstraint = ''

    # list of sqlconstraints now used, which gets handled within the loop.

    # run some summary stats on everything
    summaryMetrics = [metrics.MedianMetric()]

    tStart = time.time()

    # Running one, or the whole lot?
    RunMax = len(runNames)

    # allow user to set a different number (say, 2)
    if MaxRuns < RunMax and MaxRuns > 0:
        RunMax = int(MaxRuns)

    # the following keyword overrides
    if RunOne:
        RunMax = 1

    print "Starting runs. RunMax = %i" % (RunMax)

    for iRun in range(RunMax):
        run = runNames[iRun][:]

    # for run in runNames:
        # Open the OpSim database
        timeStartIteration = time.time()

        # Some syntax added to test for existence of the database
        dbFil = run+'_sqlite.db'
        if not os.access(dbFil, os.R_OK):
            print "runAstrom.go FATAL - cannot acces db file %s" % (dbFil)
            print "runAstrom.go FATAL - skipping run %s" % (run)
            continue
    
        else:
            deltaT = time.time()-tStart
            print "runAstrom.go INFO - ##################################"
            print "runAstrom.go INFO - starting run %s with nside=%i after %.2f minutes" \
                % (run, nside, deltaT/60.)

        opsdb = db.OpsimDatabase(run+'_sqlite.db')

        # Set SQL constraint appropriate for each filter in the
        # list. If we supplied a list of filters, use it for 
        sqlconstraint = ''
        ThisFilter = 'ugrizy'
        if len(LFilters) == len(runNames):

            # Only change the filter if one was actually supplied!
            if len(LFilters[iRun]) > 0:
                ThisFilter = LFilters[iRun]

                sqlconstraint = sqlFromFilterString(ThisFilter)

###                sqlconstraint = 'filter = "%s"' % (ThisFilter)


        # If nightmax was supplied, use it 
        ThisNightMax = int(nightMax)  # copy not view
        if len(LNightMax) == len(runNames):

            # Only update nightmax if one was given
            try:
                ThisNightMax = int(LNightMax[iRun])  # This might be redundant with the fmt statement below.
                if len(sqlconstraint) < 1:
                    sqlconstraint = 'night < %i' % (ThisNightMax)
                else:
                    sqlconstraint = '%s and night < %i' % (sqlconstraint, ThisNightMax)
            except:
                print "runAstrom.go WARN - run %i problem with NightMax" % (iRun)
                dumdum = 1.

        # Set where the output should go - include the filter!! 
        sMag = '%.1f' % (rmag)
        sMag = sMag.replace(".","p")
        outDir = './metricEvals/%s_nside%i_%s_n%i_r%s' % (run, nside, ThisFilter, ThisNightMax, sMag)

        # Ensure we'll be able to find this later on...
        if CustomPlotLimits:
            outDir = '%s_lims' % (outDir)

        # if we are testing the kind of correlation used, include that
        # in the output here.
        if checkCorrKind:
            if useSpearmanR[iRun]:
                sCorr = 'spearmanR'
            else:
                sCorr = 'pearsonR'
        
            outDir = '%s_%s' % (outDir, sCorr)

        # From this point onwards, stuff actually gets run. This is
        # the place to output what will actually happen next.
        print "runAstrom.go INFO - about to run:"
        print "runAstrom.go INFO - sqlconstraint: %s ; run name %s ; nside %i" % (sqlconstraint, run, nside)
        print "runAstrom.go INFO - output directory will be %s" % (outDir)
        if not DoRun:
            continue

        # ensure the output directory actually exists...
        if not os.access(outDir, os.R_OK):
            print "runAstrom.go INFO - creating output directory %s" % (outDir)
            os.makedirs(outDir)


        resultsDb = db.ResultsDb(outDir=outDir)
        bundleList = []

        # WIC - to make this at least somewhat uniform, build the plot
        # functions including arguments out of our copies above.
        plotFuncsPropmotion = copy.deepcopy(DPlotArgs['propmotion'])
        plotFuncsParallax = copy.deepcopy(DPlotArgs['parallax'])
        plotFuncsCoverage = copy.deepcopy(DPlotArgs['coverage'])
        plotFuncsHAdegen = copy.deepcopy(DPlotArgs['HAdegen'])

        # if using custom plot limits, will want to include the limits
        # for proper motion and parallax too... programming a bit defensively 
        # here, including an extra check (rather than just the length of the lists 
        # above). 
        if CustomPlotLimits:
            if len(LUpperParallax) == len(runNames):
                plotFuncsParallax[1].defaultPlotDict['xMax'] = float(LUpperParallax[iRun])

            if len(LUpperPropmotion) == len(runNames):
                plotFuncsPropmotion[1].defaultPlotDict['xMax'] = float(LUpperPropmotion[iRun])

        # Configure the metrics
        metric = metrics.ParallaxMetric(rmag=rmag, seeingCol=seeingCol, SedTemplate=SedTemplate)
        bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, runName=run,
#                                            plotFuncs=plotFuncs, \
                                                plotFuncs = plotFuncsParallax, \
                                                summaryMetrics=summaryMetrics)
        bundleList.append(bundle)

        metric=metrics.ProperMotionMetric(rmag=rmag, seeingCol=seeingCol, SedTemplate=SedTemplate)
        bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, runName=run,
#                                            plotFuncs=plotFuncs, \
                                                plotFuncs=plotFuncsPropmotion, \
                                                summaryMetrics=summaryMetrics)
        bundleList.append(bundle)

        metric = calibrationMetrics.ParallaxCoverageMetric(rmag=rmag, seeingCol=seeingCol, SedTemplate=SedTemplate)
        bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, runName=run,
#                                            plotFuncs=plotFuncs, \
                                                plotFuncs=plotFuncsCoverage, \
                                                summaryMetrics=summaryMetrics)
        bundleList.append(bundle)

        # Now for the HA Degen metric. If testing the type of
        # correlation, call the metric differently here. Since the
        # argument to actually do this is only part of my github fork
        # at the moment, we use a different call. Running with default
        # arguments (checkCorrKind=False) should then work without
        # difficulty.
        metric = calibrationMetrics.ParallaxHADegenMetric(rmag=rmag, seeingCol=seeingCol, SedTemplate=SedTemplate)
        if checkCorrKind:
            metric = calibrationMetrics.ParallaxHADegenMetric(rmag=rmag, seeingCol=seeingCol, SedTemplate=SedTemplate, useSpearmanR=useSpearmanR[iRun])
            print "TESTING CORRELATION KIND -- useSpearmanR", useSpearmanR[iRun]
            

        bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, runName=run,
#                                            plotFuncs=plotFuncs, \
                                                plotFuncs=plotFuncsHAdegen, \
                                                summaryMetrics=summaryMetrics)
        bundleList.append(bundle)

        # Run everything and make plots
        bundleDict = metricBundles.makeBundlesDictFromList(bundleList)
        bgroup = metricBundles.MetricBundleGroup(bundleDict, opsdb, outDir=outDir, resultsDb=resultsDb)
#        try:
        bgroup.runAll()

        print "runAstrom.go INFO - bundles took %.2f minutes" \
            % ((time.time() - timeStartIteration) / 60.)

#        except KeyboardInterrupt:
#            print "runAstrom.go FATAL - keyboard interrupt detected. Halting."
#            return
        bgroup.plotAll()

        print "runAstrom.go INFO - bundles + plotting took %.2f minutes" \
            % ((time.time() - timeStartIteration) / 60.)
        

    print "Finished entire set. %i runs took %.2f minutes." % (iRun + 1, (time.time()-tStart)/60.)
Beispiel #23
0
def go(nside=64, rmag=20., SedTemplate='flat', DoRun=False, LFilters = [], \
           LNightMax=[], nightMax=1e4, \
           CustomPlotLimits=True, \
           RunOne=False, MaxRuns=1e3, \
           SpatialClip=95.):

    # runNames = ['enigma_1189', 'ops2_1093']

    # runNames
    #runNames = ['ops2_1092', 'kraken_1038', 'kraken_1034', 'ops2_1098']
    #runNames = ['kraken_1038', 'kraken_1034', 'ops2_1098']

    # 2015-12-23 - put kraken_1038 at the end, it seems to run
    # extremely slowly...
    runNames = ['enigma_1189', 'ops2_1098', 'kraken_1034', 'kraken_1038']

    runNames = ['ops2_1092', 'kraken_1033', 'enigma_1271']

    # UPDATE - ops2_1092 ran quite quickly on nside=32... rerun on 64

    runNames = ['ops2_1092', 'enigma_1189', 'enigma_1271', 'kraken_1038']
    
    # UPDATE 2015-12-28 -- run with single-filter choices, compare
    # enigma to ops2_1092

    # WIC 2015-12-28 -- try with single-filter and all then small subset
    runNames = ['ops2_1092', 'ops2_1092', 'ops2_1092', 'enigma_1189', 'enigma_1189', 'enigma_1189']
    LFilters = ["u", "y", '',   "u", "y", '']
    LNightMax = [1e4, 1e4, 730, 1e4, 1e4, 730]

    # WIC 2015-12-28 - 23:00 - try using a different SED template,
    # just go with single filters for now
    #
    # DO WE NEED THIS??
    

    # WIC 2015-12-28 - 22:00; much to my surprise, that took less than
    # half an hour to go all the way through. Try again, this time using slightly more filters.    
    runNames = ['ops2_1092', 'enigma_1189', 'ops2_1092', 'enigma_1189']
    LFilters = ['', '', 'griz', 'griz']  # (griz was not recognized)

    # WIC 2015-12-29 - set up for a master-run with all cases, this time with plotting limits
    # Break the specifications across lines to make subdivision easier
    
    # Subsets by time first, then by filter, finally the whole shebang
    
    # (Yes the inversion of the first two is deliberate.)
    runNames = ['enigma_1189', 'ops2_1092', 'ops2_1092', 'enigma_1189', \
                    'ops2_1092', 'enigma_1189', 'ops2_1092', 'enigma_1189', \
                    'ops2_1092', 'enigma_1189']
    LFilters = ['', '', '', '', \
                    'u', 'u', 'y', 'y', \
                    '', '']  
    LNightMax = [365, 365, 730, 730, \
                     1e4, 1e4, 1e4, 1e4, \
                     1e4, 1e4]

    # List of upper limits to parallax and proper motion error. For parallax, 3.0 mas is probably good
    LUpperParallax = []
    LUpperPropmotion = []

    if CustomPlotLimits:
    
        LUpperParallax = [10, 10, 10, 10, \
                              10, 10, 40, 40, \
                              3.0, 3.0 ]
    
        # For proper motion, it's a little tricky to say because the
        # regular case is so pathological for the field. Try the following:
        LUpperPropmotion = [40, 40, 5, 20, \
                                3.5, 20, 3.5, 20, \
                                0.5, 5]


    print "runAstrom.go INFO - will run the following:"
    for iSho in range(len(runNames)):
        print "%i: %-12s, %1s, %i" % (iSho, runNames[iSho], LFilters[iSho], LNightMax[iSho])
    print "==========================="

#    print runNames
#    if not DoRun:
#        print "Set DoRun=True to actually run this."
#        print len(LFilters), len(runNames), len(LFilters) == len(runNames)
#        return

#'kraken_1038', 'kraken_1034', 'ops2_1098']


    # nside = 64

    slicer = slicers.HealpixSlicer(nside=nside)

    # Make it so we don't bother with the silly power spectra
    plotFuncs = [plots.HealpixSkyMap(), plots.HealpixHistogram()]

    # WIC - back up the plotting arguments with a default value
    plotFuncsPristine = copy.deepcopy(plotFuncs)

    # WIC - the only way this will make sense to me is if I make a
    # dictionary of plot arguments. Let's try it...
    DPlotArgs = {}
    for plotArg in ['parallax', 'propmotion', 'coverage', 'HAdegen']:
        DPlotArgs[plotArg] = copy.deepcopy(plotFuncs)

    if CustomPlotLimits:

        # All spatial maps use percentile clipping
        for plotMetric in DPlotArgs.keys():
            DPlotArgs[plotMetric][0].defaultPlotDict['percentileClip'] = SpatialClip

        # Some limits common to spatial maps and histograms
        for iPl in range(0,2):
            DPlotArgs['propmotion'][iPl].defaultPlotDict['logScale'] = True
            
        # Standardized range for the histograms for new parallax metrics
        DPlotArgs['coverage'][1].defaultPlotDict['xMin'] = 0.
        DPlotArgs['coverage'][1].defaultPlotDict['xMax'] = 1.
        DPlotArgs['HAdegen'][1].defaultPlotDict['xMin'] = -1.
        DPlotArgs['HAdegen'][1].defaultPlotDict['xMax'] =  1.
            
        # Standardize at least the lower bound of the histogram in
        # both the proper motion and parallax errors. Upper limit we
        # can customize with a loop.
        DPlotArgs['propmotion'][1].defaultPlotDict['xMin'] = 1e-2  # should not be zero if log scale!!
        DPlotArgs['parallax'][1].defaultPlotDict['xMin'] = 0.


    # WIC - try changing the plot dictionary

    if not DoRun:
        plotFuncs[0].defaultPlotDict['logScale'] = True
        print DPlotArgs['propmotion'][0].defaultPlotDict
        print DPlotArgs['propmotion'][1].defaultPlotDict
        
        return

    # The old runs have the seeing in finSeeing
    seeingCol = 'finSeeing'

    # Try it out for a 20th mag star with a flat SED (can change mag
    # or to OBAFGKM)
    # rmag = 20. ## NOW AN ARGUMENT
    #SedTemplate='flat'

    # Use all the observations. Can change if you want a different
    # time span
    sqlconstraint = ''

    # list of sqlconstraints now used, which gets handled within the loop.

    # run some summary stats on everything
    summaryMetrics = [metrics.MedianMetric()]

    tStart = time.time()

    # Running one, or the whole lot?
    RunMax = len(runNames)

    # allow user to set a different number (say, 2)
    if MaxRuns < RunMax and MaxRuns > 0:
        RunMax = int(MaxRuns)

    # the following keyword overrides
    if RunOne:
        RunMax = 1

    print "Starting runs. RunMax = %i" % (RunMax)

    for iRun in range(RunMax):
        run = runNames[iRun][:]

    # for run in runNames:
        # Open the OpSim database
        timeStartIteration = time.time()

        # Some syntax added to test for existence of the database
        dbFil = run+'_sqlite.db'
        if not os.access(dbFil, os.R_OK):
            print "runAstrom.go FATAL - cannot acces db file %s" % (dbFil)
            print "runAstrom.go FATAL - skipping run %s" % (run)
            continue
    
        else:
            deltaT = time.time()-tStart
            print "runAstrom.go INFO - ##################################"
            print "runAstrom.go INFO - starting run %s with nside=%i after %.2f minutes" \
                % (run, nside, deltaT/60.)

        opsdb = db.OpsimDatabase(run+'_sqlite.db')

        # Set SQL constraint appropriate for each filter in the
        # list. If we supplied a list of filters, use it for 
        sqlconstraint = ''
        ThisFilter = 'ugrizy'
        if len(LFilters) == len(runNames):

            # Only change the filter if one was actually supplied!
            if len(LFilters[iRun]) == 1:
                ThisFilter = LFilters[iRun]
                sqlconstraint = 'filter = "%s"' % (ThisFilter)

        # If nightmax was supplied, use it 
        ThisNightMax = int(nightMax)  # copy not view
        if len(LNightMax) == len(runNames):

            # Only update nightmax if one was given
            try:
                ThisNightMax = int(LNightMax[iRun])  # This might be redundant with the fmt statement below.
                if len(sqlconstraint) < 1:
                    sqlconstraint = 'night < %i' % (ThisNightMax)
                else:
                    sqlconstraint = '%s and night < %i' % (sqlconstraint, ThisNightMax)
            except:
                print "runAstrom.go WARN - run %i problem with NightMax" % (iRun)
                dumdum = 1.

        # Set where the output should go - include the filter!! 
        sMag = '%.1f' % (rmag)
        sMag = sMag.replace(".","p")
        outDir = '%s_nside%i_%s_n%i_r%s' % (run, nside, ThisFilter, ThisNightMax, sMag)

        # Ensure we'll be able to find this later on...
        if CustomPlotLimits:
            outDir = '%s_lims' % (outDir)

        # From this point onwards, stuff actually gets run. This is
        # the place to output what will actually happen next.
        print "runAstrom.go INFO - about to run:"
        print "runAstrom.go INFO - sqlconstraint: %s ; run name %s ; nside %i" % (sqlconstraint, run, nside)
        print "runAstrom.go INFO - output directory will be %s" % (outDir)
        if not DoRun:
            continue

        resultsDb = db.ResultsDb(outDir=outDir)
        bundleList = []

        # WIC - to make this at least somewhat uniform, build the plot
        # functions including arguments out of our copies above.
        plotFuncsPropmotion = copy.deepcopy(DPlotArgs['propmotion'])
        plotFuncsParallax = copy.deepcopy(DPlotArgs['parallax'])
        plotFuncsCoverage = copy.deepcopy(DPlotArgs['coverage'])
        plotFuncsHAdegen = copy.deepcopy(DPlotArgs['HAdegen'])

        # if using custom plot limits, will want to include the limits
        # for proper motion and parallax too... programming a bit defensively 
        # here, including an extra check (rather than just the length of the lists 
        # above). 
        if CustomPlotLimits:
            if len(LUpperParallax) == len(runNames):
                plotFuncsParallax[1].defaultPlotDict['xMax'] = float(LUpperParallax[iRun])

            if len(LUpperPropmotion) == len(runNames):
                plotFuncsPropmotion[1].defaultPlotDict['xMax'] = float(LUpperPropmotion[iRun])

        # Configure the metrics
        metric = metrics.ParallaxMetric(rmag=rmag, seeingCol=seeingCol, SedTemplate=SedTemplate)
        bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, runName=run,
#                                            plotFuncs=plotFuncs, \
                                                plotFuncs = plotFuncsParallax, \
                                                summaryMetrics=summaryMetrics)
        bundleList.append(bundle)

        metric=metrics.ProperMotionMetric(rmag=rmag, seeingCol=seeingCol, SedTemplate=SedTemplate)
        bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, runName=run,
#                                            plotFuncs=plotFuncs, \
                                                plotFuncs=plotFuncsPropmotion, \
                                                summaryMetrics=summaryMetrics)
        bundleList.append(bundle)

        metric = metrics.ParallaxCoverageMetric(rmag=rmag, seeingCol=seeingCol, SedTemplate=SedTemplate)
        bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, runName=run,
#                                            plotFuncs=plotFuncs, \
                                                plotFuncs=plotFuncsCoverage, \
                                                summaryMetrics=summaryMetrics)
        bundleList.append(bundle)

        metric = metrics.ParallaxHADegenMetric(rmag=rmag, seeingCol=seeingCol, SedTemplate=SedTemplate)
        bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint, runName=run,
#                                            plotFuncs=plotFuncs, \
                                                plotFuncs=plotFuncsHAdegen, \
                                                summaryMetrics=summaryMetrics)
        bundleList.append(bundle)

        # Run everything and make plots
        bundleDict = metricBundles.makeBundlesDictFromList(bundleList)
        bgroup = metricBundles.MetricBundleGroup(bundleDict, opsdb, outDir=outDir, resultsDb=resultsDb)
#        try:
        bgroup.runAll()

        print "runAstrom.go INFO - bundles took %.2f minutes" \
            % ((time.time() - timeStartIteration) / 60.)

#        except KeyboardInterrupt:
#            print "runAstrom.go FATAL - keyboard interrupt detected. Halting."
#            return
        bgroup.plotAll()

        print "runAstrom.go INFO - bundles + plotting took %.2f minutes" \
            % ((time.time() - timeStartIteration) / 60.)
        

    print "Finished entire set. %i runs took %.2f minutes." % (iRun + 1, (time.time()-tStart)/60.)
import matplotlib.pyplot as plt
import lsst.sims.maf.metricBundles as metricBundles
import lsst.sims.maf.metrics as metrics
import lsst.sims.maf.slicers as slicers
import lsst.sims.maf.stackers as stackers
import lsst.sims.maf.db as db

database = db.OpsimDatabase('sqlite:///enigma_1189_sqlite.db')

metric = metrics.MeanMetric(col='HA')
slicer = slicers.HealpixSlicer(nside=4)
stackerList = [stackers.NormAirmassStacker()]

mb = metricBundles.MetricBundle(metric,
                                slicer,
                                stackerList=stackerList,
                                sqlconstraint='filter="r" and night < 100')
metric = metrics.RmsMetric(col='airmass')
mb2 = metricBundles.MetricBundle(metric,
                                 slicer,
                                 stackerList=stackerList,
                                 sqlconstraint='filter="r" and night < 100')

print mb.dbCols

mbD = {0: mb, 1: mb2}

mbg = metricBundles.MetricBundleGroup(mbD, database, outDir='test')
mbg.runAll()
mbg.plotAll(closefigs=False)
plt.show()
Beispiel #25
0
from __future__ import print_function
import lsst.sims.maf.slicers as slicers
import lsst.sims.maf.db as db

# Connect to opsim
dbAddress = 'sqlite:///ops1_1140_sqlite.db'
oo = db.OpsimDatabase(dbAddress)
colnames = ['expMJD', 'fieldRA', 'fieldDec']
sqlconstraint = 'filter="r"'
# Get opsim simulation data
simdata = oo.fetchMetricData(colnames, sqlconstraint)
# Init the slicer, set 2 points
slicer = slicers.UserPointsSlicer(ra=[0., .1], dec=[0., -.1])
# Setup slicer (builds kdTree)
slicer.setupSlicer(simdata)
# Slice Point for index zero
ind = slicer._sliceSimData(0)
expMJDs = simdata[ind['idxs']]['expMJD']
print('mjd for the 1st user defined point', expMJDs)
# Find the expMJDs for the 2nd point
ind = slicer._sliceSimData(1)
expMJDs = simdata[ind['idxs']]['expMJD']
print('mjd for the 2nd user defined point', expMJDs)
Beispiel #26
0
    # Check if user passed directory + filename as opsimDb.
    if len(os.path.dirname(args.opsimDb)) > 0:
        raise Exception(
            'OpsimDB should be just the filename of the sqlite file (not %s). Use --dbDir.'
            % (args.opsimDb))

    opsimName = args.opsimDb.replace('_sqlite.db', '')
    metadata = args.sqlConstraint.replace('=', '').replace(
        'filter', '').replace("'", '').replace('"', '').replace('/', '.')

    if not args.skipComp:
        verbose = False
        # Get db connection info, and connect to database.
        dbfile = os.path.join(args.dbDir, args.opsimDb)
        oo = db.OpsimDatabase(dbfile)
        sqlconstraint = args.sqlConstraint
        # Fetch the data from opsim.
        simdata, fields = getData(oo, sqlconstraint)
        # Set up the time bins for the movie slicer.
        start_date = simdata['expMJD'][0]
        if args.movieStepsize == 0:
            bins = simdata['expMJD']
        else:
            end_date = simdata['expMJD'].max()
            bins = np.arange(start_date, end_date + args.movieStepSize / 2.0,
                             args.movieStepSize, float)
        if args.addPreviousObs:
            # Go back and grab all the data, including all previous observations.
            if "night =" in sqlconstraint:
                sqlconstraint = sqlconstraint.replace("night =", "night <=")
    def getMetrics(self): 
        
        colmn = 'observationStartMJD';
        opsdb = db.OpsimDatabase(self.opsim)
        
        # Directory where tmp files are going to be stored TODO eliminate - this
        outDir = 'TmpDir'
        resultsDb = db.ResultsDb(outDir=outDir)

        
        metric=metrics.PassMetric(cols=[colmn,'fiveSigmaDepth', 'filter'])
        slicer = slicers.UserPointsSlicer(ra=self.ra,dec=self.dec)
        sqlconstraint = 'filter = \'' + self.fil + '\''

        bundle = mb.MetricBundle(metric, slicer, sqlconstraint, runName=self.name)
        bgroup = mb.MetricBundleGroup({0: bundle}, opsdb, outDir=outDir, resultsDb=resultsDb)
        bgroup.runAll();
        
        filters = np.unique(bundle.metricValues[0]['filter'])
        mv = bundle.metricValues[0]


        # Get dates
        self.mjd = mv[colmn]
        self.mjd = np.sort(self.mjd)


        # Define redshift bins
        zbin = np.linspace(0.5,7.5,8)
        zbin = np.insert(zbin,0,0)

        # Converting MJD to survey days
        T=np.int(self.mjd.max()-self.mjd.min()+1)
        swop=[]
        wedgeop=[]
        scop=[]
        edgecop=[]
        i=0

        total = len(zbin)*(self.nlc);
        progress = 0;

        # We generate a number (nlc) of light curves for each redshift bin
        for z in zbin:
            for w in range(self.nlc):
                # Generating continuous light curve (cadence=1d)
                tt, yy = drw_artificial_lc(T, z=z, frame=self.frame)
         
                sn, edgesn = self.sf(tt,yy,z=z)
                # Calculating SF for the current continuous light curve
                scop.append(sn)
                edgecop.append(edgesn)
                self.edgesn = edgesn
                # Generating OpSim light curve evaluated on the current continuous light curve
                top,yop=self.__opsim_lc(tt,yy)
                # Calculating SF for the current OpSim light curve
                srol,edgesrol=self.sf(top,yop,z=z)
                swop.append(srol)
                wedgeop.append(edgesrol)

                #progressBar(progress, total);
                progress = progress + 1;
            i=i+1  # counter


        swop=np.asarray(swop)
        swop=swop.reshape(9,self.nlc,99)
        scop=np.asarray(scop)
        scop=scop.reshape(9,self.nlc,99)
        razrol=[]
        for z in range(9):
            for r in range(self.nlc):
                # Calculating the SF metric
                razrol.append((np.nan_to_num(np.sqrt(scop[z,r,:]))-np.nan_to_num(np.sqrt(swop[z,r,:]))))

        razrol9=np.asarray(razrol)
        razrol9=razrol9.reshape(9,self.nlc,99)
        # We take the mean of generated light curves for each redshift bin.
        self.raz2=np.nanmean(razrol9[:,:,:],axis=1)
Beispiel #28
0
    def run(self, dataSlice, slicePoint=None):

        m5Values = np.zeros(dataSlice.size,dtype=float)
        raftNames = [self.convertDict[point[0:5]]  for point in slicePoint['chipNames']]
        v1rafts = np.in1d(raftNames, self.rafts1)
        m5Values[v1rafts] = dataSlice[self.m5v1Col][v1rafts]

        v2rafts = np.in1d(raftNames, self.rafts2)
        m5Values[v2rafts] = dataSlice[self.m5v2Col][v2rafts]

        good = np.where( m5Values != 0.)
        return 1.25 * np.log10(np.sum(10.**(.8*m5Values[good])))



opsdb = db.OpsimDatabase('enigma_1189_sqlite.db')
outDir = 'Flipped'
resultsDb = db.ResultsDb(outDir=outDir)

# Grab just the WFD area
propids, propTags = opsdb.fetchPropInfo()
WFDpropid = propTags['WFD']
wfdWhere = utils.createSQLWhere('WFD', propTags)

summaryStats = [metrics.MedianMetric(), metrics.RmsMetric(), metrics.RobustRmsMetric()]

filters = ['u','g']
nside = 64
bundleList = []

years = [1,3,10]
Beispiel #29
0
        " Default=fieldRA.")
    parser.add_argument(
        "--latCol",
        type=str,
        default='fieldDec',
        help="Column to use for Dec values (can be a stacker dither column)." +
        " Default=fieldDec.")
    parser.add_argument('--night', type=int, default=1)

    parser.set_defaults()
    args, extras = parser.parse_known_args()

    bundleDict = makeBundleList(args.dbFile,
                                nside=args.nside,
                                lonCol=args.lonCol,
                                latCol=args.latCol,
                                night=args.night)

    # Set up / connect to resultsDb.
    resultsDb = db.ResultsDb(outDir=args.outDir)
    # Connect to opsimdb.
    opsdb = db.OpsimDatabase(args.dbFile)

    # Set up metricBundleGroup.
    group = metricBundles.MetricBundleGroup(bundleDict,
                                            opsdb,
                                            outDir=args.outDir,
                                            resultsDb=resultsDb)
    group.runAll()
    group.plotAll()
def run(config_filename):
    # YAML input file.
    config = yaml.load(open(config_filename), Loader=yaml.FullLoader)
    # print(config)
    outDir = 'Test'  # this is for MAF

    # grab the db filename from yaml input file
    dbFile = config['Observations']['filename']
    """
    conn = sqlite3.connect(dbFile)
    cur = conn.cursor()
    table_name='Proposal'
    result = cur.execute("PRAGMA table_info('%s')" % table_name).fetchall()
    print('Results',result)
    cur.execute("SELECT * FROM Proposal")
    rows = cur.fetchall()
    for row in rows:
        print(row)
    print('end')
    cur.execute('PRAGMA TABLE_INFO({})'.format('ObsHistory'))
    
    names = [tup[1] for tup in cur.fetchall()]
    print(names)
    """

    #check whether X0_norm file exist or not (and generate it if necessary)
    absMag = config['SN parameters']['absmag']
    salt2Dir = config['SN parameters']['salt2Dir']
    model = config['Simulator']['model']
    version = str(config['Simulator']['version'])

    x0normFile = 'reference_files/X0_norm_{}.npy'.format(absMag)

    if not os.path.isfile(x0normFile):
        from sn_tools.sn_utils import X0_norm
        X0_norm(salt2Dir=salt2Dir,
                model=model,
                version=version,
                absmag=absMag,
                outfile=x0normFile)

    x0_tab = np.load(x0normFile)

    reference_lc = None
    if 'sn_fast' in config['Simulator']['name']:
        print('Loading reference LCs from',
              config['Simulator']['Reference File'])
        reference_lc = GetReference(config['Simulator']['Reference File'],
                                    config['Instrument'])
        print('Reference LCs loaded')

    module = import_module(config['Metric'])

    if dbFile != 'None':
        if dbFile.endswith('.db'):
            opsimdb = db.OpsimDatabase(dbFile)
            version = opsimdb.opsimVersion
            propinfo, proptags = opsimdb.fetchPropInfo()
            print('proptags and propinfo', proptags, propinfo)

            # grab the fieldtype (DD or WFD) from yaml input file
            fieldtype = config['Observations']['fieldtype']
            slicer = slicers.HealpixSlicer(
                nside=config['Pixelisation']['nside'])

            # print('slicer',slicer.pixArea,slicer.slicePoints['ra'])
            #print('alors condif', config)
            metric = module.SNMetric(config=config,
                                     coadd=config['Observations']['coadd'],
                                     x0_norm=x0_tab,
                                     reference_lc=reference_lc)

            sqlconstraint = opsimdb.createSQLWhere(fieldtype, proptags)

            mb = metricBundles.MetricBundle(metric, slicer, sqlconstraint)

            mbD = {0: mb}

            resultsDb = db.ResultsDb(outDir=outDir)

            mbg = metricBundles.MetricBundleGroup(mbD,
                                                  opsimdb,
                                                  outDir=outDir,
                                                  resultsDb=resultsDb)

            mbg.runAll()

        if dbFile.endswith('.npy'):
            metric = module.SNMetric(config=config,
                                     coadd=False,
                                     x0_norm=x0_tab,
                                     reference_lc=reference_lc)

            observations = np.load(dbFile)

            metric.run(observations)

        if metric.save_status:
            metric.simu.Finish()

    else:
        config_fake = yaml.load(open(config['Param_file']),
                                Loader=yaml.FullLoader)
        fake_obs = GenerateFakeObservations(config_fake).Observations

        metric = module.SNMetric(config=config,
                                 coadd=config['Observations']['coadd'],
                                 x0_norm=x0_tab)
        metric.run(fake_obs)