Ejemplo n.º 1
0
def inject_fakes_to_calexp(calexp_repo,
                           calexp_id,
                           magVar,
                           coords,
                           poisson=False):
    """This function injects fakes onto a calexp exposure at given positions.
       caelxp_repo:    An empty calexp repo which is used to save the injected exposure.
       calexp_id:      The data id of the calexp exposure
       magVar:         The injected magnitude
       coords:         A list of [x, y] pixel coordinates
       poisson:        If true, add Poisson fluctuations to the fake model
    """

    calexp_butler = Butler(calexp_repo)
    calexp_photoCalib = calexp_butler.get('calexp_photoCalib', calexp_id)
    calexp_exposure = calexp_butler.get('calexp', dataId=calexp_id)
    for coord in coords:
        x, y = coord[0], coord[1]
        inject_star(calexp_exposure,
                    calexp_photoCalib,
                    x,
                    y,
                    magVar,
                    poisson=poisson)
    calexp_butler.put(calexp_exposure, 'calexp', dataId=calexp_id)
Ejemplo n.º 2
0
def load_tract(repo, tract, patches=None, **kwargs):
    """Merge catalogs from forced-photometry coadds across available filters.

    Parameters
    --
    tract: int
        Tract of sky region to load
    repo: str
        File location of Butler repository+rerun to load.
    patches: list of str
        List of patches.  If not specified, will default to '0,0'--'7,7'.

    Returns
    --
    AstroPy Table of merged catalog
    """
    butler = Butler(repo)

    if patches is None:
        # Extract the patches for this tract from the skymap
        skymap = butler.get(datasetType='deepCoadd_skyMap')
        patches = ['%d,%d' % patch.getIndex() for patch in skymap[tract]]

    merged_patch_cats = []
    for patch in patches:
        this_patch_merged_cat = load_patch(butler, tract, patch, **kwargs)
        # Event if this_patch_merged_cat is an empty Table, it's still fine to append to the list here.
        # They will get vstacked away below.
        merged_patch_cats.append(this_patch_merged_cat)

    merged_tract_cat = vstack(merged_patch_cats)
    return merged_tract_cat
Ejemplo n.º 3
0
def loadAllTables(filters, newpath, oldpath, dataPath, patch, tract):
    logger.info("Loading new catalog")
    newCats = loadCatalogs(filters, newpath, "template")
    logger.info("Loading new flux conserved catalog")
    newCats2 = loadCatalogs(filters, newpath)
    logger.info("Loading old catalog")
    oldCats = loadCatalogs(filters, oldpath)
    logger.info("Building astropy tables")
    newFlux, newSed = getAllFlux(newCats, filters)
    newFlux2, newSed2 = getAllFlux(newCats2, filters)
    newTable = buildAllTables(newCats, newFlux, newSed, filters)
    newTable2 = buildAllTables(newCats2, newFlux2, newSed2, filters)
    newTable = newTable[(newTable["parent"] != 0) & ~np.isnan(newTable["x"])]
    newTable2 = newTable2[(newTable2["parent"] != 0)
                          & ~np.isnan(newTable2["x"])]
    oldTables = OrderedDict([(f, buildTable(oldCats[f])) for f in filters])
    logger.info("matching results")
    oldTable, matches = matchAllCatalogs(newTable, oldTables, filters)
    matchedNew = newTable[matches]
    matchedNew2 = newTable2[matches]

    logger.info("loading calexps")
    butler = Butler(inputs=dataPath)
    calexp = OrderedDict()
    for f in filters:
        calexp[f] = butler.get('deepCoadd_calexp',
                               patch=patch,
                               filter="HSC-" + f,
                               tract=tract)
    return oldTable, matchedNew, matchedNew2, calexp, newCats
Ejemplo n.º 4
0
def load_tract(repo, tract, patches=None, **kwargs):
    """Merge catalogs from forced-photometry coadds across available filters.

    Parameters
    --
    tract: int
        Tract of sky region to load
    repo: str
        File location of Butler repository+rerun to load.
    patches: list of str
        List of patches.  If not specified, will default to '0,0'--'7,7'.

    Returns
    --
    Pandas DataFrame of merged catalog
    """
    butler = Butler(repo)

    if patches is None:
        # Extract the patches for this tract from the skymap
        skymap = butler.get(datasetType='deepCoadd_skyMap')
        patches = ['%d,%d' % patch.getIndex() for patch in skymap[tract]]

    merged_tract_cat = pd.DataFrame()
    for patch in patches:
        this_patch_merged_cat = load_patch(butler, tract, patch, **kwargs)
        merged_tract_cat.append(this_patch_merged_cat)

    return merged_tract_cat
Ejemplo n.º 5
0
 def testCameraFromButler(self):
     """Test that the butler can return the camera
     """
     butler = Butler(self.input)
     camera = butler.get("camera", immediate=True)
     self.assertEqual(camera.getName(), self.mapper.camera.getName())
     self.assertEqual(len(camera), len(self.mapper.camera))
     self.assertEqual(len(camera[0]), len(self.mapper.camera[0]))
def main(visit, detector, diarepo, outputdir='.'):
    diabutler = Butler(diarepo)

    diff_img_path = os.path.join(outputdir,
                                 f'diff_exposure_v{visit}_d{detector}.fits')
    science_img_path = os.path.join(
        outputdir, f'science_exposure_v{visit}_d{detector}.fits')

    imgD = diabutler.get('deepDiff_differenceExp',
                         visit=visit,
                         detector=detector)
    imgD.writeFits(diff_img_path)

    imgS = diabutler.get('calexp', visit=visit, detector=detector)
    imgS.writeFits(science_img_path)

    print(f'files copied to {outputdir}')
    return
Ejemplo n.º 7
0
class FitsStorageTestCase(lsst.utils.tests.TestCase):
    def setUp(self):
        inputDir = os.path.join(ROOT, "data", "input")
        self.testDir = tempfile.mkdtemp(dir=os.path.join(ROOT, 'tests'), prefix=type(self).__name__+'-')
        self.butler = Butler(inputs=inputDir, outputs={"root": self.testDir, "mode": 'rw'})

    def tearDown(self):
        del self.butler
        if os.path.exists(self.testDir):
            shutil.rmtree(self.testDir)

    def testReadPropertyList(self):
        """Test that reading a FitsStorage dataset into a PropertyList
        is the same as using the special _md Exposure hook.
        """
        md1 = self.butler.get("raw_md", visit=1, filter="g")
        md2 = self.butler.get("rawMetadataDirect", visit=1, filter="g")
        self.assertEqual(md1, md2)
Ejemplo n.º 8
0
    def ingest(cls, root, camera, visit, filenames, sensors, metadata):
        """Add all images from an external visit (a full-focal-plane
        exposure) to a data repository.

        This both symlinks the external data files to the appropriate
        location in the directory structure and adds the necessary
        rows to the SQLite registry tables.

        Parameters
        ----------
        root : str
            Directory of the data repository to add data to.  Must have
            an existing "registry.sqlite3" file present directly in the
            root and a _mapper file pointing to HscAndExtMapper.
        camera : str
            Name of the camera used to produced the external observation.
            Must have an entry in ExternalImage.CAMERA_INFO.
        visit : int
            Original integer visit ID for the observation, *before* adding
            CAMERA_INFO[camera]["ID"]*CAMERA_ID_MULTIPLIER.
        filenames : list
            A list of file names containing the external data files, either
            relative to the current directory or absolute.
        sensors : list
            A list of integer sensor IDs corresponding to the filenames list.
        metadata : VisitMetadata
            An object containing additional metadata for this visit to be
            added to the registry.  See VisitMetadata for a description of
            what attributes are required.
        """
        db = sqlite3.connect(os.path.join(root, "registry.sqlite3"))
        butler = Butler(inputs=[root])
        visit += cls.CAMERA_INFO[camera]["id"] * cls.CAMERA_ID_MULTIPLIER
        ccdCols = [
            "filter", "dateObs", "taiObs", "field", "expId", "pointing",
            "dataType", "pa"
        ]
        ccdSql = "INSERT INTO raw (visit, ccd, {}) VALUES (?, ?, {})".format(
            ", ".join(ccdCols), ", ".join(["?"] * len(ccdCols)))
        ccdValues = tuple(getattr(metadata, col) for col in ccdCols)
        visitCols = ["filter", "dateObs", "taiObs", "field"]
        visitSql = "INSERT INTO raw_visit (visit, {}) VALUES (?, {})".format(
            ", ".join(visitCols), ", ".join(["?"] * len(visitCols)))
        visitValues = tuple(getattr(metadata, col) for col in visitCols)
        for filename, sensor in zip(filenames, sensors):
            outputFileName = butler.get("external_filename",
                                        visit=visit,
                                        ccd=sensor)[0]
            os.symlink(filename, outputFileName)
            db.execute(ccdSql, (
                visit,
                sensor,
            ) + ccdValues)
        db.execute(visitSql, (visit, ) + visitValues)
        db.commit()
        return visit
Ejemplo n.º 9
0
 def testDetectors(self):
     """Test that the detector returned by the gen 2 butler is the same
     as the expected one.
     """
     for root, did, expected in zip(self.roots, self.ids, self.expecteds):
         butler = Butler(root)
         raw = butler.get("raw", dataId=did)
         for amp1, amp2 in zip(expected['detector'], raw.getDetector()):
             with self.subTest(amp=amp1.getName()):
                 self.assertEqual(amp1.getName(), amp2.getName())
                 self.assertAmpRawBBoxesEqual(amp1, amp2)
Ejemplo n.º 10
0
class SimpleButlerInterface(BaseGenericCatalog):
    """
    A simple butler interface.

    Args:
    -----
    repo: str
        path to repository containing the DM-processed data.
    dataId: dict
    """
    def _subclass_init(self, repo, datasetType, dataId=None, **kwargs):

        if not _HAS_LSST_STACK:
            raise RuntimeError('LSST Stack not available')

        self._butler = Butler(repo)
        self._datasetType = datasetType
        self._dataId_cache = self._butler.subset(self._datasetType,
                                                 dataId=dataId).cache

        self._columns = None
        for dataId in self._dataId_cache:
            data = self._get_data(dataId)
            if data is not None:
                self._columns = data.schema.getNames()
                break

        if not self._columns:
            raise RuntimeError('No datasets or columns found!')

    def _get_data(self, dataId, datasetType=None):
        try:
            data = self._butler.get(datasetType or self._datasetType,
                                    dataId=dataId)
        except NoResults:
            return None
        return data

    def _generate_quantity_getter(self, dataId):
        data = self._get_data(dataId)
        if data is None:
            return
        return data.get

    def _iter_native_dataset(self, native_filters=None):
        for dataId in self._dataId_cache:
            if native_filters is None or native_filters.check_scalar(dataId):
                quantity_getter = self._generate_quantity_getter(dataId)
                if quantity_getter is not None:
                    yield quantity_getter

    def _generate_native_quantity_list(self):
        return self._columns
Ejemplo n.º 11
0
 def testAssemble(self):
     """Test the assembly of E2V and ITL sensors
     """
     task = AssembleCcdTask()
     # exclude LATISS for this test since we don't have an expected output
     for root, did, expected in zip(self.roots, self.ids, self.expecteds):
         butler = Butler(root)
         raw = butler.get("raw", dataId=did)
         assembled = task.assembleCcd(raw)
         count = numpy.sum(expected['expected'].read().array -
                           assembled.getImage().array)
         self.assertEqual(count, 0)
Ejemplo n.º 12
0
def load_and_save_tract(repo,
                        tract,
                        filename,
                        key_prefix='coadd',
                        patches=None,
                        overwrite=True,
                        verbose=False,
                        **kwargs):
    """Save catalogs to HDF5 from forced-photometry coadds across available filters.

    Iterates through patches, saving each in append mode to the save HDF5 file.

    Parameters
    --
    repo: str
        File location of Butler repository+rerun to load.
    tract: int
        Tract of sky region to load
    filename: str
        Filename for HDF file.
    key_prefix: str
        Base for the key in the HDF file.
        Keys will be of the form "%s_%d_%s" % (keybase, tract, patch)
        With the addition that the comma will be removed from the patch name
        to provide a valid Python identifier: e.g., 'coadd_4849_11'
    overwrite: bool
        Overwrite an existing HDF file.
    """
    butler = Butler(repo)

    if patches is None:
        # Extract the patches for this tract from the skymap
        skymap = butler.get(datasetType='deepCoadd_skyMap')
        patches = ['%d,%d' % patch.getIndex() for patch in skymap[tract]]

    for patch in patches:
        if verbose:
            print("Processing tract %d, patch %s" % (tract, patch))
        patch_merged_cat = load_patch(butler,
                                      tract,
                                      patch,
                                      verbose=verbose,
                                      **kwargs)
        if len(patch_merged_cat) == 0:
            if verbose:
                print("  No good entries for tract %d, patch %s" %
                      (tract, patch))
            continue

        key = '%s_%d_%s' % (key_prefix, tract, patch)
        key = valid_identifier_name(key)
        patch_merged_cat.to_pandas().to_hdf(filename, key, format='fixed')
Ejemplo n.º 13
0
class ArcTestCase(lsst.utils.tests.TestCase):
    def setUp(self):
        self.butler = Butler(
            os.path.join(weeklyRerun, "calib", self.arms, f"arc_{self.arms}",
                         "detectorMap"))

    def tearDown(self):
        del self.butler

    def testResiduals(self):
        """Test that wavelength fit residuals are reasonable"""
        tolerance = {
            "b": 0.05,
            "r": 0.02,
            "m": 0.01,
            "n": 0.05,
        }  # tolerance for each arm, nm
        minSigNoise = 10  # Minimum (flux) signal-to-noise for arc lines to consider
        for arm in self.arms:
            atol = tolerance[arm]
            detMap = self.butler.get("detectorMap", visit=self.visit, arm=arm)
            lines = self.butler.get("arcLines", visit=self.visit, arm=arm)
            fitWavelength = detMap.findWavelength(lines.fiberId, lines.y)
            good = ~lines.flag & (lines.status == 0)
            sigNoise = lines.intensity / lines.intensityErr
            for fiberId in set(lines.fiberId):
                with self.subTest(arm=arm, fiberId=fiberId):
                    select = (lines.fiberId
                              == fiberId) & good & (sigNoise > minSigNoise)
                    num = select.sum()
                    self.assertGreater(num, 10)

                    residual = lines.wavelength[select] - fitWavelength[select]
                    lq, median, uq = np.percentile(residual,
                                                   (25.0, 50.0, 75.0))
                    robustRms = 0.741 * (uq - lq)

                    self.assertFloatsAlmostEqual(median, 0.0, atol=atol)
                    self.assertFloatsAlmostEqual(robustRms, 0.0, atol=atol)
Ejemplo n.º 14
0
class FitsStorageTestCase(lsst.utils.tests.TestCase):
    def setUp(self):
        inputDir = os.path.join(ROOT, "data", "input")
        self.testDir = tempfile.mkdtemp(dir=os.path.join(ROOT, 'tests'),
                                        prefix=type(self).__name__ + '-')
        self.butler = Butler(inputs=inputDir,
                             outputs={
                                 "root": self.testDir,
                                 "mode": 'rw'
                             })

    def tearDown(self):
        del self.butler
        if os.path.exists(self.testDir):
            shutil.rmtree(self.testDir)

    def testReadPropertyList(self):
        """Test that reading a FitsStorage dataset into a PropertyList
        is the same as using the special _md Exposure hook.
        """
        md1 = self.butler.get("raw_md", visit=1, filter="g")
        md2 = self.butler.get("rawMetadataDirect", visit=1, filter="g")
        self.assertEqual(md1, md2)
Ejemplo n.º 15
0
 def testExposureIdInfo(self):
     butler = Butler(self.input)
     expIdBits = self.mapper.bypass_ccdExposureId_bits( # args are ignored
         datasetType = None,
         pythonType = int,
         location = None,
         dataId = dict(),
     )
     for visit in (1, 2, 3):
         dataId = dict(visit=visit)
         expIdInfo = butler.get("expIdInfo", dataId=dataId)
         self.assertEqual(expIdInfo.expId, visit)
         self.assertEqual(expIdInfo.expBits, expIdBits)
         self.assertEqual(expIdInfo.maxBits, 64)
         self.assertEqual(expIdInfo.unusedBits, expIdInfo.maxBits-expIdBits)
Ejemplo n.º 16
0
    def testBasics(self):
        """Test construction of a discrete sky map
        """
        butler = Butler(inputs=self.inPath,
                        outputs={
                            'root': self.outPath,
                            'mode': 'rw'
                        })
        coordList = []  # list of sky coords of all corners of all calexp
        for dataId in (
                dict(visit=1, filter="g"),
                dict(visit=2, filter="g"),
                dict(visit=3, filter="r"),
        ):
            # TODO: pybind11 remove `immediate=True` once DM-9112 is resolved
            rawImage = butler.get("raw", dataId, immediate=True)
            # fake calexp by simply copying raw data; the task just cares about its bounding box
            # (which is slightly larger for raw, but that doesn't matter for this test)
            calexp = rawImage
            butler.put(calexp, "calexp", dataId)
            calexpWcs = calexp.getWcs()
            calexpBoxD = Box2D(calexp.getBBox())
            coordList += [
                calexpWcs.pixelToSky(corner)
                for corner in calexpBoxD.getCorners()
            ]

        # use the calexp to make a sky map
        retVal = MakeDiscreteSkyMapTask.parseAndRun(
            args=[self.inPath, "--output", self.outPath, "--id", "filter=g^r"],
            config=self.config,
            doReturnResults=True,
        )
        self.assertEqual(len(retVal.resultList), 1)
        skyMap = retVal.resultList[0].result.skyMap
        self.assertEqual(type(skyMap), DiscreteSkyMap)
        self.assertEqual(len(skyMap), 1)
        tractInfo = skyMap[0]
        self.assertEqual(tractInfo.getId(), 0)
        self.assertEqual(tractInfo.getNumPatches(), Extent2I(3, 3))
        tractWcs = tractInfo.getWcs()
        tractBoxD = Box2D(tractInfo.getBBox())
        for skyPoint in coordList:
            self.assertTrue(tractBoxD.contains(tractWcs.skyToPixel(skyPoint)))
    def testBasics(self):
        """Test construction of a discrete sky map
        """
        butler = Butler(inputs=self.inPath, outputs={'root': self.outPath, 'mode': 'rw'})
        coordList = []  # list of sky coords of all corners of all calexp
        for dataId in (
            dict(visit=1, filter="g"),
            dict(visit=2, filter="g"),
            dict(visit=3, filter="r"),
        ):
            rawImage = butler.get("raw", dataId)
            # fake calexp by simply copying raw data; the task just cares about its bounding box
            # (which is slightly larger for raw, but that doesn't matter for this test)
            calexp = rawImage
            butler.put(calexp, "calexp", dataId)
            calexpWcs = calexp.getWcs()
            calexpBoxD = Box2D(calexp.getBBox())
            coordList += [calexpWcs.pixelToSky(corner) for corner in calexpBoxD.getCorners()]

        # use the calexp to make a sky map
        retVal = MakeDiscreteSkyMapTask.parseAndRun(
            args=[self.inPath, "--output", self.outPath, "--id", "filter=g^r"],
            config=self.config,
            doReturnResults=True,
        )
        self.assertEqual(len(retVal.resultList), 1)
        skyMap = retVal.resultList[0].result.skyMap
        self.assertEqual(type(skyMap), DiscreteSkyMap)
        self.assertEqual(len(skyMap), 1)
        tractInfo = skyMap[0]
        self.assertEqual(tractInfo.getId(), 0)
        self.assertEqual(tractInfo.getNumPatches(), Extent2I(3, 3))
        tractWcs = tractInfo.getWcs()
        tractBoxD = Box2D(tractInfo.getBBox())
        for skyPoint in coordList:
            self.assertTrue(tractBoxD.contains(tractWcs.skyToPixel(skyPoint)))
import lsst.afw.image as afwImage
import lsst.afw.display as afwDisplay
import lsst.afw.cameraGeom as cameraGeom

from lsst.daf.persistence import Butler

import create_coaddComands as ccoadd
import create_multiBandCommands as multib
import create_diaCommands as cdia
import create_assocCommands as cassoc
import create_forcedPhotCommands as cfPhot

repo = '/global/cscratch1/sd/desc/DC2/data/Run2.1i/rerun/calexp-v1'
b = Butler(repo)

skymap = b.get('deepCoadd_skyMap')

# creating a rectangle of 2 sq. degree for tract/patch search
radec_NE = afwGeom.SpherePoint(58, -31, afwGeom.degrees)
radec_SE = afwGeom.SpherePoint(58, -32, afwGeom.degrees)
radec_SW = afwGeom.SpherePoint(56, -32, afwGeom.degrees)
radec_NW = afwGeom.SpherePoint(56, -31, afwGeom.degrees)
rect = [radec_NE, radec_NW, radec_SW, radec_SE]

tpatches = skymap.findTractPatchList(rect)

# to figure the visits on this patches we need the DB
# this will help us dtermine seeing and time cuts for coadds
database = repo + '/tracts_mapping.sqlite3'
query_tmpl = "select DISTINCT(visit), filter from overlaps WHERE tract={} and patch={} order by visit"
conn = sqlite3.connect(database)
Ejemplo n.º 19
0
    filt = filt[-1]
    if filt in ('u', 'y') or n_visit not in list(visitab.obsHistID):
        print('continue')
        continue
    
    raft_list = glob(avisit+'/R*')
    for araft in raft_list:
        raft_n = int(os.path.basename(araft)[1:])
        #print(raft_n)
        # have visit number and raft number
        detlist = glob(araft+'/diaSrc_*.fits')
        for adet in detlist:
            det_n = int(os.path.basename(adet)[-8:-5])
            print(n_visit, filt, raft_n, det_n)

            im = b.get('calexp', visit=n_visit, detector=det_n)
            #im.getBBox().getCorners()
            image_wcs = im.getWcs()
            corners = [geom.Point2D(c) for c in im.getBBox().getCorners()]
            sky_corners = image_wcs.pixelToSky(corners)
            
            #print('we have wcs, break')
            vinfo = visitab.query(f'obsHistID=={n_visit}').values[0]
            bsight = geom.SpherePoint(vinfo[4]*geom.degrees, vinfo[5]*geom.degrees)
            orient = vinfo[10]*geom.degrees
            minionwcs = makeSkyWcs(trans[det_n], orient, flipX=True, boresight=bsight,
                    projection='TAN')
            minion_skycorners = minionwcs.pixelToSky(corners)

            print(minion_skycorners, sky_corners)
            
Ejemplo n.º 20
0
    def prep(self):
        self.prior = bfd.MomentPrior()
        priorFiles = []
        priorButler = Butler(self.config.priorRerun)
        prior_skyMap = priorButler.get('deepCoadd_skyMap')

        for tract in self.config.priorTracts:
            for patchInfo in prior_skyMap[tract]:
                patch = '%d,%d' % patchInfo.getIndex()

                if self.config.priorPatches:
                    if patch not in self.config.priorPatches:
                        continue

                if priorButler.datasetExists('deepCoadd_momentPrior',
                                             tract=tract,
                                             patch=patch,
                                             filter=self.config.priorFilter,
                                             label=self.config.priorLabel):
                    priorFiles.append(
                        priorButler.get('deepCoadd_momentPrior_filename',
                                        tract=tract,
                                        patch=patch,
                                        filter=self.config.priorFilter,
                                        label=self.config.priorLabel)[0])

        max_file = len(priorFiles)
        if self.config.maxPriorFiles > 0:
            max_file = self.config.maxPriorFiles

        first = True
        for file in priorFiles[:max_file]:
            if file.find('_parent') > 0:
                self.log.info("Skipping %s, from parent" % file)
                continue
            self.log.info("Adding prior %s" % file)
            try:
                cat = lsst.afw.table.BaseCatalog.readFits(file)
                self.prior.addCatalog(cat, self.config.invariantCovariance,
                                      self.config.sampleFraction,
                                      self.config.sampleSeed)
                # Should be same for all prior catalogs
                if first:
                    self.cov = numpy.array(
                        cat.getTable().getMetadata().getArrayDouble(
                            'COV')).reshape(6, 6)
                    first = False
            except Exception as e:
                print('Failed to read', e)
                continue

        self.prior.prepare()
        self.fluxMin = self.prior.getFluxMin()
        self.fluxMax = self.prior.getFluxMax()
        self.varMin = self.prior.getVarMin()
        self.varMax = self.prior.getVarMax()
        selectionPqr = self.prior.selectionProbability(
            self.cov.astype(numpy.float32))
        deselect = selectionPqr.copy()
        deselect[0] = 1 - selectionPqr[0]
        for i in range(1, 6):
            deselect[i] *= -1.
        self.noSelectPqr = deselect
Ejemplo n.º 21
0
def load_patch(butler_or_repo,
               tract,
               patch,
               fields_to_join=('id', ),
               filters={
                   'u': 'u',
                   'g': 'g',
                   'r': 'r',
                   'i': 'i',
                   'z': 'z',
                   'y': 'y'
               },
               trim_colnames_for_fits=False,
               verbose=False):
    """Load patch catalogs.  Return merged catalog across filters.

    butler_or_repo: Butler object or str
        Either a Butler object or a filename to the repo
    tract: int
        Tract in skymap
    patch: str
        Patch in the tract in the skymap
    fields_to_join: iterable of str
        Join the catalogs for each filter on these fields
    filters: iterable of str
        Filter names to load
    trim_colnames_for_fits: bool
        Trim column names to satisfy the FITS standard character limit of <68.

    Returns
    --
    AstroPy Table of patch catalog merged across filters.
    """
    if isinstance(butler_or_repo, str):
        butler = Butler(butler_or_repo)
    else:
        butler = butler_or_repo

    # Define the filters and order in which to sort them.:
    tract_patch_data_id = {'tract': tract, 'patch': patch}
    try:
        ref_table = butler.get(datasetType='deepCoadd_ref',
                               dataId=tract_patch_data_id).asAstropy()
    except NoResults as e:
        if verbose:
            print(" ", e)
        return Table()

    isPrimary = ref_table['detect_isPrimary']
    ref_table = ref_table[isPrimary]
    if len(ref_table) == 0:
        if verbose:
            print("  No good isPrimary entries for tract %d, patch %s" %
                  (tract, patch))
        return ref_table

    merge_filter_cats = {}
    for filt in filters:
        this_data = tract_patch_data_id.copy()
        this_data['filter'] = filters[filt]
        try:
            cat = butler.get(datasetType='deepCoadd_forced_src',
                             dataId=this_data).asAstropy()
        except NoResults as e:
            if verbose:
                print(" ", e)
            continue

        CoaddCalib = butler.get('deepCoadd_calexp_calib', this_data)
        CoaddCalib.setThrowOnNegativeFlux(False)

        mag, mag_err = CoaddCalib.getMagnitude(cat['base_PsfFlux_flux'],
                                               cat['base_PsfFlux_fluxSigma'])

        cat['mag'] = mag
        cat['mag_err'] = mag_err
        cat['SNR'] = np.abs(
            cat['base_PsfFlux_flux']) / cat['base_PsfFlux_fluxSigma']

        cat = cat[isPrimary]

        merge_filter_cats[filt] = cat

    merged_patch_cat = ref_table
    for filt in filters:
        if filt not in merge_filter_cats:
            continue

        cat = merge_filter_cats[filt]
        if len(cat) < 1:
            continue
        # Rename duplicate columns with prefix of filter
        prefix_columns(cat, filt, fields_to_skip=fields_to_join)
        # Merge metadata with concatenation
        with enable_merge_strategies(MergeNumbersAsList,
                                     MergeListNumbersAsList):
            merged_patch_cat = join(merged_patch_cat, cat, keys=fields_to_join)

    if trim_colnames_for_fits:
        # FITS column names can't be longer that 68 characters
        # Trim here to ensure consistency across any format we write this out to
        trim_long_colnames(merged_patch_cat)

    return merged_patch_cat
Ejemplo n.º 22
0
class Taster(object):
    """
    Worker for tasting the datasets in a Butler's repo (based mostly off of querying metadata).
    Instantiate with a repo.
    """
    def __init__(self, repo, vb=False, path_to_tracts=''):
        self.repo = repo
        # Instantiate a butler, or report failure:
        from lsst.daf.persistence import Butler
        try:
            self.butler = Butler(repo)
        except:
            self.butler = None
            print("Warning: failed to instantiate a butler to get data from repo '"+repo+"'")
            return None
        # Set up some internal variables:
        self.vb = vb
        self.exists = {}
        self.existence = False
        self.counts = {}
        self.tracts = []
        self.path_to_tracts = path_to_tracts
        if path_to_tracts != '':
            try:
                self.skymap_butler = Butler(repo + path_to_tracts)
            except:
                self.skymap_butler = None
                print("Warning: failed to find a skyMap for the path " + repo + path_to_tracts)
        return
    
    def what_exists(self, all=False):
        """
        Check for the existence of various useful things. 
        
        Parameters
        ==========
        all: boolean
            If true, the method will check all possible dataset types
        
        Returns
        =======
        exists: dict
            Checklist of what exists (True) and what does not (False)
        """
        # Get mappers for all tested repos
        from lsst.obs.hsc import HscMapper
        from lsst.obs.comCam import ComCamMapper
        #from lsst.obs.lsst import LsstCamMapper
        from lsst.obs.ctio0m9 import Ctio0m9Mapper
        
        #select proper mapper
        if self.repo.find('hsc') != -1: mapper = HscMapper(root=self.repo)
        elif self.repo.find('comCam') != -1: mapper = ComCamMapper(root=self.repo)
        #elif self.repo.find('DC2') != -1: mapper = LsstCamMapper(root=self.repo)
        elif self.repo.find('ctio0m9') != -1: mapper = Ctio0m9Mapper(root=self.repo)
        else: print("Unable to locate Mapper file in specified repo. Check that you selected a valid repo.")
            
        
        if all:
            #collect a list of all possible dataset types
            mapper = HscMapper(root=self.repo)
            all_dataset_types = mapper.getDatasetTypes()

            remove = ['_config', '_filename', '_md', '_sub', '_len', '_schema', '_metadata']

            interesting = []
            for dataset_type in all_dataset_types:
                keep = True
                for word in remove:
                    if word in dataset_type:
                        keep = False
                if keep:
                    interesting.append(dataset_type)
        
        else: 
            interesting = ['raw', 'calexp', 'src', 'deepCoadd_calexp', 'deepCoadd_meas']
        
        self.look_for_datasets_of_type(interesting)
        self.look_for_skymap()
        self.existence = True
        return
    
    def look_for_datasets_of_type(self, datasettypes):
        """
        Check whether dataset of given type is in the metadata.
        
        Parameters
        ==========
        datasettype: list of strings
            Types of dataset to check for, eg 'calexp', 'raw', 'wcs' etc. 
        """
        datasets_that_exist = []
        datasets_that_do_not_exist = []
        
        for datasettype in datasettypes:        
            try:
                datasetkeys = self.butler.getKeys(datasettype)
                onekey = list(datasetkeys.keys())[0]
                metadata = self.butler.queryMetadata(datasettype, [onekey])
                #if self.vb: print("{} dataset exists.".format(datasettype))
                datasets_that_exist.append(datasettype)
                self.exists[datasettype] = True
            except:
                #if self.vb: print("{} dataset doesn't exist.".format(datasettype))
                datasets_that_do_not_exist.append(datasettype)
                self.exists[datasettype] = False
        
        #Organize output
        if self.vb:
            print("Datasets that exist\n-------------------")
            print(datasets_that_exist)
            print("\nDatasets that do not exist\n--------------------------")
            print(datasets_that_do_not_exist)
            
        return
    
    def look_for_skymap(self):
        """
        Check for the existence of a skymap. 
        """
        try:
            self.skyMap = self.skymap_butler.get('deepCoadd_skyMap')
            self.exists['deepCoadd_skyMap'] = True
            if self.vb: print("\nSkymap\n-------------------\ndeepCoadd_skyMap exists.")
        except:
            self.skyMap = None
            self.exists['deepCoadd_skyMap'] = False
            if self.vb: print("\nSkymap\n-------------------\ndeepCoadd_skyMap doesn't exist.")
        return
    
       
    
    def estimate_sky_area(self):
        """
        Use available skymap to estimate sky area covered by tracts and patches.
        
        Returns
        =======
        area: float
            Sky area in square degrees
        """
        if self.skyMap is None: return None
        
        area_label = 'Total Sky Area (deg$^2$)'
        if area_label in self.counts.keys():
            return self.counts[area_label]
        
        # Collect tracts from files
        import os, glob
        tracts = sorted([int(os.path.basename(x)) for x in
                 glob.glob(os.path.join(self.repo + self.path_to_tracts, 'deepCoadd-results', 'merged', '*'))])
        
        self.tracts = tracts
        self.counts['Number of Tracts'] = len(tracts)
        
        # Note: We'd like to do this with the butler, but it appears 'tracts' have to be
        #       specified in the dataId to be queried, so the queryMetadata method fails

        # Calculate area from all tracts
        total_area = 0.0  #deg^2
        plotting_vertices = []
        for test_tract in tracts:
            # Get inner vertices for tract
            tractInfo = self.skyMap[test_tract]
            vertices = tractInfo._vertexCoordList
            plotting_vertices.append(vertices)

            # Calculate area of box
            av_dec = 0.5 * (vertices[2][1] + vertices[0][1])
            av_dec = av_dec.asRadians()
            delta_ra_raw = vertices[0][0] - vertices[1][0] 
            delta_ra = delta_ra_raw.asDegrees() * np.cos(av_dec)
            delta_dec= vertices[2][1] - vertices[0][1]
            area = delta_ra * delta_dec.asDegrees()

            # Combine areas
            total_area += area

        if self.vb: print(area_label, ": ", total_area)

        # Round of the total area for table purposes
        self.counts[area_label] = round(total_area, 2)
        return self.counts[area_label]

    def count_things(self):
        """
        Count the available number of calexp visits, sensors, fields etc.
        """
        # Collect numbers of images of various kinds:
        if self.exists['calexp']:
            self.counts['Number of Visits'] = \
                len(self.butler.queryMetadata('calexp', ['visit']))
            self.counts['Number of Pointings'] = \
                len(self.butler.queryMetadata('calexp', ['pointing']))
            self.counts['Number of Sensor Visits'] = \
                len(self.butler.queryMetadata('calexp', ['ccd']))
            self.counts['Number of Fields'] = \
                len(self.butler.queryMetadata('calexp', ['field']))
            self.counts['Number of Filters'] = \
                len(self.butler.queryMetadata('calexp', ['filter']))
        # Collect number of objects from Source Catalog
        if self.exists['src']:
            self.counts['Number of Sources'] = \
                len(self.butler.queryMetadata('src', ['id']))
        return
    
    def plot_sky_coverage(self):
        import matplotlib.pyplot as plt
        fig = plt.figure()

        for tract in self.tracts:
            tractInfo = self.skyMap[tract]

            corners = [(x[0].asDegrees(), x[1].asDegrees()) for x in tractInfo.getVertexList()]
            x = [k[0] for k in corners] + [corners[0][0]]
            y = [k[1] for k in corners] + [corners[0][1]]

            plt.plot(x,y, color='b')

        plt.xlabel('RA (deg)')
        plt.ylabel('Dec (deg)')
        plt.title('2D Projection of Sky Coverage')

        plt.show()
        return 

    
    def report(self):
        """
        Print a nice report of the data available in this repo.
        """
        # First check what's there:
        if not self.existence: self.what_exists()
        
        # Then, get the numbers:
        self.count_things()
        self.estimate_sky_area()
        
        # A nice bold section heading:
        display(Markdown('### Main Repo: %s' % self.repo))
        if self.path_to_tracts != '':
            display(Markdown('### Specified Tract Directory: %s' %self.path_to_tracts))

        # Make a table of the collected metadata
        output_table = "|   Metadata Characteristics  |  | \n  | :---: | --- | \n "
        for key in self.counts.keys():
            output_table += "| %s |  %s | \n" %(key, self.counts[key])
        
        # Display it:
        display(Markdown(output_table))
        
        # Plot sky coverage
        self.plot_sky_coverage()

        return
Ejemplo n.º 23
0
from astropy.coordinates import SkyCoord
from astropy.table import vstack

from lsst.afw.geom import makeSkyWcs
from lsst.daf.persistence import Butler
from lsst.obs.lsst.imsim import ImsimMapper


template_repo = '/global/cscratch1/sd/bos0109/templates_rect'
diarepo = template_repo + '/rerun/diff_rect'
assocrepo = diarepo + '/rerun/assoc_secrun'
forcerepo = assocrepo + '/rerun/forcedPhot' 
tmprepo = template_repo + '/rerun/multiband'

diabutler = Butler(forcerepo)
skymap = diabutler.get('deepCoadd_skyMap')

truth_lightc = pd.read_csv('./lightcurves/lightcurves_cat_rect_58.0_56.0_-31.0_-32.0.csv')
sntab = pd.read_csv('./catalogs+tables/supernovae_cat_rect_58.0_56.0_-31.0_-32.0.csv')
#truth_lightc = pd.read_csv('./lightcurves/lightcurves_cat_rect_58_56_-31_-32.csv')
#sntab = pd.read_csv('./catalogs+tables/supernovae_cat_rect_58_56_-31_-32.csv')

#diaSrc_store = pd.HDFStore('/global/cscratch1/sd/bos0109/diaSrc_forced_fulltables_v4.h5')
diaSrc_store = pd.HDFStore('/global/homes/b/bos0109/run2_diaproc/results/diaSrc_secrun_fulltables_v4.h5')
diaSrc_store.open()
metacols = ['id', 'visit', 'filter', 'raftName', 'detectorName', 'detector']

#region ------------------------------------------------------------------------
## =================== Convenient functions ================================= ##
def get_truth_LC(truth_tab, snid):
    sffx = ('_observable', '_observed', '_flux', '_fluxErr', '_mag', '_magErr')
Ejemplo n.º 24
0
class ButlerWrapper(object):

    def __init__(self, inputs, outputs=None):
        """Initialize the butler wrapper class.

        Parameters
        ----------
        inputs : RepositoryArgs, dict, or str
            Can be a single item or a list. Provides arguments to load an
            existing repository (or repositories). String is assumed to be a
            URI and is used as the cfgRoot (URI to the location of the cfg
            file). (Local file system URI does not have to start with
            'file://' and in this way can be a relative path). The
            'RepositoryArgs' class can be used to provide more parameters with
            which to initialize a repository (such as 'mapper', 'mapperArgs',
            'tags', etc. See the 'RepositoryArgs' documentation for more
            details). A dict may be used as shorthand for a 'RepositoryArgs'
            class instance. The dict keys must match parameters to the
            'RepositoryArgs.__init__' function.
        outputs : RepositoryArgs, dict, or str, optional
            Provides arguments to load one or more existing repositories or
            create new ones. The different types are handled the same as for
            'inputs'. (the default is None.)
        """

        self._butler = Butler(inputs=inputs, outputs=outputs)

    def setInputsAndOutputs(self, inputs, outputs=None):
        """Set the inputs and outputs of butler.

        Parameters
        ----------
        inputs : RepositoryArgs, dict, or str
            Can be a single item or a list. Provides arguments to load an
            existing repository (or repositories). String is assumed to be a
            URI and is used as the cfgRoot (URI to the location of the cfg
            file). (Local file system URI does not have to start with
            'file://' and in this way can be a relative path). The
            'RepositoryArgs' class can be used to provide more parameters with
            which to initialize a repository (such as 'mapper', 'mapperArgs',
            'tags', etc. See the 'RepositoryArgs' documentation for more
            details). A dict may be used as shorthand for a 'RepositoryArgs'
            class instance. The dict keys must match parameters to the
            'RepositoryArgs.__init__' function.
        outputs : RepositoryArgs, dict, or str, optional
            Provides arguments to load one or more existing repositories or
            create new ones. The different types are handled the same as for
            'inputs'. (the default is None.)
        """

        self._butler = Butler(inputs=inputs, outputs=outputs)

    def getRawExp(self, visit, raft, sensor, snap=None):
        """Get the raw exposure.

        Parameters
        ----------
        visit : int
            Visit Id.
        raft : str
            Abbreviated raft name (e.g. "R22").
        sensor : str
            Abbreviated sensor name (e.g. "S11").
        snap : int, optional
            Snap time (0 or 1) means first/ second exposure. (the default is
            None.)

        Returns
        -------
        lsst.afw.image.exposure.exposure.ExposureF
            Raw exposure object.
        """

        dataId = self._getDefaultDataId(visit, raft, sensor)
        if (snap is not None) and isinstance(snap, (int, float)):
            dataId["snap"] = int(snap)

        return self._butler.get("raw", dataId=dataId)

    def _getDefaultDataId(self, visit, raft, sensor):
        """Get the default data Id.

        Parameters
        ----------
        visit : int
            Visit Id.
        raft : str
            Abbreviated raft name (e.g. "R22").
        sensor : str
            Abbreviated sensor name (e.g. "S11").

        Returns
        -------
        dict
            Default data Id.
        """

        dataId = dict(visit=int(visit), raftName=raft, detectorName=sensor)

        return dataId

    def getPostIsrCcd(self, visit, raft, sensor, afilter=None):
        """Get the post-ISR CCD exposure.

        ISR: Instrument signature removal.
        CCD: Charge-coupled device.

        Parameters
        ----------
        visit : int
            Visit Id.
        raft : str
            Abbreviated raft name (e.g. "R22").
        sensor : str
            Abbreviated sensor name (e.g. "S11").
        afilter : str, optional
            Active filter ("u", "g", "r", "i", "z", "y") (the default is None.)

        Returns
        -------
        lsst.afw.image.exposure.exposure.ExposureF
            Post-ISR CCD object.
        """

        dataId = self._getDefaultDataId(visit, raft, sensor)
        if (afilter is not None) and isinstance(afilter, str):
            dataId["filter"] = afilter

        return self._butler.get("postISRCCD", dataId=dataId)

    @staticmethod
    def getImageData(exposure):
        """Get the image data.

        Parameters
        ----------
        exposure : lsst.afw.image.exposure.exposure.ExposureF
            Exposure object.

        Returns
        -------
        numpy.ndarray
            Image data.
        """

        # Get the numpy array data based on the input object type
        if isinstance(exposure, np.ndarray):
            data = exposure
        elif hasattr(exposure, "getMaskedImage"):
            data = exposure.getMaskedImage().getImage().getArray()
        elif hasattr(exposure, "getImage"):
            data = exposure.getImage().getArray()
        else:
            data = exposure.getArray()

        # Return the data in numpy array
        return data
Ejemplo n.º 25
0
from lsst.daf.persistence import Butler
import lsst.afw.detection as afwDetection
import lsst.afw.geom as afwGeom
import lsst.geom as geom
import bfd
import galsim
import numpy as np

b = Butler('/datasets/hsc/repo/rerun/RC/w_2018_24/DM-14688/')
#exposure =b.get('deepCoadd_calexp',tract=9813,patch='4,4',filter='HSC-I')
#meas = b.get('deepCoadd_meas',tract=9813,patch='4,4',filter='HSC-I')

exposure = b.get('calexp', visit=1228, ccd=49)
meas = b.get('src', visit=1228, ccd=49)

ds = []
for ii in range(1000):
    sns = []
    sigmas = np.arange(0.2, 2, 0.1)
    for i in sigmas:
        weight = bfd.KSigmaWeight(4, i)

        measRecord = meas[ii]

        center = measRecord.getCentroid()

        orig_box = measRecord.getFootprint().getBBox()
        w, h = orig_box.getWidth(), orig_box.getHeight()
        box_size = max(w, h)
        if box_size % 2 == 1:
            box_size += 1
Ejemplo n.º 26
0
    def prep(self):

        if self.initialized:
            return

        self.prior = None
        priorFiles = []
        priorButler = Butler(self.config.priorRerun)
        prior_skyMap = priorButler.get('deepCoadd_skyMap')

        for tract in self.config.priorTracts:
            for patchInfo in prior_skyMap[tract]:
                patch = '%d,%d' % patchInfo.getIndex()

                if self.config.priorPatches:
                    if patch not in self.config.priorPatches:
                        continue

                if priorButler.datasetExists('deepCoadd_prior', tract=tract, patch=patch,
                                             filter=self.config.priorFilter, label=self.config.priorLabel):
                    priorFiles.append(priorButler.getUri('deepCoadd_prior',
                                                         tract=tract, patch=patch,
                                                         filter=self.config.priorFilter,
                                                         label=self.config.priorLabel))

        max_file = len(priorFiles)
        if self.config.maxPriorFiles > 0:
            max_file = self.config.maxPriorFiles

        self.zBin = None
        for file in priorFiles[:max_file]:
            if file.find('_parent') > 0:
                self.log.info("Skipping %s, from parent" % file)
                continue
            self.log.info("Adding prior %s" % file)
            try:
                cat = afwTable.BaseCatalog.readFits(file)
                md = cat.getTable().getMetadata().toDict()

                if self.prior is None:
                    self.fluxMin = md['FLUXMIN']
                    self.fluxMax = md['FLUXMAX']
                    self.varMin = md['VARMIN']
                    self.varMax = md['VARMAX']
                    cov_even = np.array(md['COV_EVEN'])
                    cov_odd = np.array(md['COV_ODD'])
                    self.zMax = md['ZMAXCUT']
                    self.zMin = md['ZMINCUT']
                    self.noiseFactor = md['noiseFactor']
                    self.priorSigmaCutoff = md['priorSigmaCutoff']
                    self.priorSigmaStep = md['priorSigmaStep']
                    self.priorSigmaBuffer = md['priorSigmaBuffer']
                    self.nSample = md['NSAMPLE']
                    self.selectionOnly = md['selectionOnly']
                    self.invariantCovariance = md['invariantCovariance']

                    covMat = self.bfd.MomentCov(cov_even.reshape(self.n_even, self.n_even),
                                                cov_odd.reshape(self.n_odd, self.n_odd))
                    self.prior = self.bfd.KDTreePrior(self.fluxMin, self.fluxMax, covMat, self.ud,
                                                      self.nSample, self.selectionOnly,
                                                      self.noiseFactor, self.priorSigmaStep,
                                                      self.priorSigmaCutoff, self.priorSigmaBuffer,
                                                      self.invariantCovariance)
                else:
                    fluxMin = md['FLUXMIN']
                    fluxMax = md['FLUXMAX']
                    varMin = md['VARMIN']
                    varMax = md['VARMAX']
                    cov_even = np.array(md['COV_EVEN'])
                    cov_odd = np.array(md['COV_ODD'])
                    zMax = md['ZMAXCUT']
                    zMin = md['ZMINCUT']
                    noiseFactor = md['noiseFactor']
                    priorSigmaCutoff = md['priorSigmaCutoff']
                    priorSigmaStep = md['priorSigmaStep']
                    priorSigmaBuffer = md['priorSigmaBuffer']
                    nSample = md['NSAMPLE']
                    selectionOnly = md['selectionOnly']
                    invariantCovariance = md['invariantCovariance']

                    mismatch = False
                    if fluxMin != self.fluxMin:
                        self.log.info('does not match fluxMin')
                        mismatch = True
                    if fluxMax != self.fluxMax:
                        self.log.info('does not match fluxMax')
                        mismatch = True
                    if varMin != self.varMin:
                        self.log.info('does not match varMin')
                        mismatch = True
                    if varMax != self.varMax:
                        self.log.info('does not match varMax')
                        mismatch = True
                    if zMin != self.zMin:
                        self.log.info('does not match zMin')
                        mismatch = True
                    if zMax != self.zMax:
                        self.log.info('does not match zMax')
                        mismatch = True
                    if noiseFactor != self.noiseFactor:
                        self.log.info('does not match fluxMin')
                        mismatch = True
                    if priorSigmaBuffer != self.priorSigmaBuffer:
                        self.log.info('does not match priorSigmaBuffer')
                        mismatch = True
                    if priorSigmaStep != self.priorSigmaStep:
                        self.log.info('does not match priorSigmaStep')
                        mismatch = True
                    if priorSigmaCutoff != self.priorSigmaCutoff:
                        self.log.info('does not match priorSigmaCutoff')
                        mismatch = True
                    if nSample != self.nSample:
                        self.log.info('does not match nSample')
                        mismatch = True
                    if selectionOnly != self.selectionOnly:
                        self.log.info('does not match selectionOnly')
                        mismatch = True
                    if invariantCovariance != self.invariantCovariance:
                        self.log.info('does not match invariantCovariance')
                        mismatch = True

                    if mismatch:
                        self.log.info('Skipping %s' % file)
                        continue

                for s in cat:
                    ti = self.bfd.TemplateInfo()
                    ti.m = s.get('m')
                    ti.dm = s.get('dm').reshape(self.bfd.BFDConfig.MSIZE, self.bfd.BFDConfig.DSIZE)
                    ti.dxy = s.get('dxy').reshape(self.bfd.BFDConfig.XYSIZE, self.bfd.BFDConfig.DSIZE)
                    ti.nda = s.get('nda')
                    ti.id = s.get('bfd_id')

                    self.prior.addTemplateInfo(ti)

            except Exception as e:
                print('Failed to read', e)
                continue
        self.prior.prepare()
        self.initialized = True
Ejemplo n.º 27
0
from lsst.daf.persistence import Butler
from importlib import reload

import lsst.afw.geom as afwGeom
import lsst.afw.cameraGeom
import lsst.geom as geom
from lsst.afw.geom import makeSkyWcs
from lsst.obs.lsst.imsim import ImsimMapper

from collections import OrderedDict as Odict

import dm_utilities as dmu

calexprepo = '/global/cscratch1/sd/desc/DC2/data/Run2.1i/rerun/calexp-v1'
b = Butler(calexprepo)
skymap = b.get('deepCoadd_skyMap')

template_repo = '/global/cscratch1/sd/bos0109/templates_rect'
tmprepo = template_repo + '/rerun/multiband'

diarepo = template_repo + '/rerun/diff_rect'
assocrepo = diarepo + '/rerun/assoc_thirrun'
forcerepo = assocrepo + '/rerun/forcedPhot'

diabutler = Butler(forcerepo)

truth_lightc = pd.read_csv(
    './lightcurves/lightcurves_cat_rect_58.0_56.0_-31.0_-32.0.csv')
#sntab = pd.read_csv('./catalogs+tables/supernovae_cat_rect_58_56_-31_-32.csv')
sntab = pd.read_csv('./results/sntab_matched.csv')
snlcs = pd.read_csv('lightcurves/sn_matched_lcs.csv')
Ejemplo n.º 28
0
import numpy as np
from matplotlib import pyplot as plt
plt.ion()

from lsst.daf.persistence import Butler

butler = Butler('/datasets/hsc/repo/rerun/RC/w_2018_10/DM-13647/')

# Get a particular calexp
# In order to find this file, I had to look through the directory structure
# $(pointing)/$(filter)/corr/CORR-$(visit)-$(ccd).fits

#subset = butler.subset('calexp', **{'ccd':95, 'pointing':1111, 'filter':'HSC-Z', 'visit':17962})
subset = butler.subset('calexp', **{'ccd':95, 'filter':'HSC-Z', 'visit':17962})
calexp = butler.get('calexp', **subset.cache[0])
src = butler.get('src', **subset.cache[0])

# Image
plt.figure(figsize=(5, 8))
plt.imshow(calexp.getImage().array, 
           vmin=np.percentile(calexp.getImage().array, 10), 
           vmax=np.percentile(calexp.getImage().array, 90))
plt.colorbar()
plt.scatter(src.getX(), src.getY(), c='red', s=2)

# Variance
plt.figure(figsize=(5, 8))
plt.imshow(calexp.getVariance().array,
           vmin=np.percentile(calexp.getVariance().array, 10),
           vmax=np.percentile(calexp.getVariance().array, 90))
Ejemplo n.º 29
0
import matplotlib.pyplot as plt
import pandas as pd

from astropy import time
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.table import vstack

from lsst.afw.geom import makeSkyWcs
from lsst.daf.persistence import Butler
from lsst.obs.lsst.imsim import ImsimMapper

calexprepo = '/global/cscratch1/sd/desc/DC2/data/Run2.1i/rerun/calexp-v1' 
b = Butler(calexprepo)
skymap = b.get('deepCoadd_skyMap')

template_repo = '/global/cscratch1/sd/bos0109/templates_rect'
diarepo = template_repo + '/rerun/diff_rect'
assocrepo = diarepo + '/rerun/assoc_thirrun'
forcerepo = assocrepo + '/rerun/forcedPhot' 
tmprepo = template_repo + '/rerun/multiband'

diabutler = Butler(forcerepo)

truth_lightc = pd.read_csv('./lightcurves/lightcurves_cat_rect_58.0_56.0_-31.0_-32.0.csv')
sntab = pd.read_csv('./catalogs+tables/supernovae_cat_rect_58.0_56.0_-31.0_-32.0.csv')

#region --------- we are going to clean the tables using the visits in forced repo
files = glob(diarepo+'/deepDiff/*')
visitn = []
Ejemplo n.º 30
0
import lsst.afw.table as afwTable

from lsst.meas.base import measurementInvestigationLib as mil

repoPath = '/datasets/hsc/repo/rerun/private/nate/shapeTest2'
tract = 9813
filt = "HSC-I"
patches = ['{},{}'.format(x, y) for x in range(4, 7) for y in range(4, 7)]
repo = Butler(repoPath)

images = []
catalogs = []

for patch in patches:
    dataId = {'tract': tract, 'patch': patch, 'filter': filt}
    tmpIm = repo.get('deepCoadd_calexp', dataId)
    tmpCat = repo.get('deepCoadd_meas', dataId)
    images.append(tmpIm)
    catalogs.append(tmpCat)

count = 0
for cat in catalogs:
    count += len(cat)

totalCatalog = afwTable.SourceCatalog(catalogs[0].schema)
totalCatalog.reserve(count)
imageId = []
for n in range(len(catalogs)):
    for record in catalogs[n]:
        tmpRecord = totalCatalog.addNew()
        tmpRecord.assign(record)
Ejemplo n.º 31
0
class ButlerWrapper(object):
    def __init__(self, inputs, outputs=None):
        """Initialize the butler wrapper class.

        Parameters
        ----------
        inputs : RepositoryArgs, dict, or str
            Can be a single item or a list. Provides arguments to load an
            existing repository (or repositories). String is assumed to be a
            URI and is used as the cfgRoot (URI to the location of the cfg
            file). (Local file system URI does not have to start with
            'file://' and in this way can be a relative path). The
            'RepositoryArgs' class can be used to provide more parameters with
            which to initialize a repository (such as 'mapper', 'mapperArgs',
            'tags', etc. See the 'RepositoryArgs' documentation for more
            details). A dict may be used as shorthand for a 'RepositoryArgs'
            class instance. The dict keys must match parameters to the
            'RepositoryArgs.__init__' function.
        outputs : RepositoryArgs, dict, or str, optional
            Provides arguments to load one or more existing repositories or
            create new ones. The different types are handled the same as for
            'inputs'. (the default is None.)
        """

        self._butler = Butler(inputs=inputs, outputs=outputs)

    def setInputsAndOutputs(self, inputs, outputs=None):
        """Set the inputs and outputs of butler.

        Parameters
        ----------
        inputs : RepositoryArgs, dict, or str
            Can be a single item or a list. Provides arguments to load an
            existing repository (or repositories). String is assumed to be a
            URI and is used as the cfgRoot (URI to the location of the cfg
            file). (Local file system URI does not have to start with
            'file://' and in this way can be a relative path). The
            'RepositoryArgs' class can be used to provide more parameters with
            which to initialize a repository (such as 'mapper', 'mapperArgs',
            'tags', etc. See the 'RepositoryArgs' documentation for more
            details). A dict may be used as shorthand for a 'RepositoryArgs'
            class instance. The dict keys must match parameters to the
            'RepositoryArgs.__init__' function.
        outputs : RepositoryArgs, dict, or str, optional
            Provides arguments to load one or more existing repositories or
            create new ones. The different types are handled the same as for
            'inputs'. (the default is None.)
        """

        self._butler = Butler(inputs=inputs, outputs=outputs)

    def getRawExp(self, visit, raft, sensor, snap=None):
        """Get the raw exposure.

        Parameters
        ----------
        visit : int
            Visit Id.
        raft : str
            Abbreviated raft name (e.g. "R22").
        sensor : str
            Abbreviated sensor name (e.g. "S11").
        snap : int, optional
            Snap time (0 or 1) means first/ second exposure. (the default is
            None.)

        Returns
        -------
        lsst.afw.image.exposure.exposure.ExposureF
            Raw exposure object.
        """

        dataId = self._getDefaultDataId(visit, raft, sensor)

        return self._butler.get("raw", dataId=dataId)

    def _getDefaultDataId(self, visit, raft, sensor):
        """Get the default data Id.

        Parameters
        ----------
        visit : int
            Visit Id.
        raft : str
            Abbreviated raft name (e.g. "R22").
        sensor : str
            Abbreviated sensor name (e.g. "S11").

        Returns
        -------
        dict
            Default data Id.
        """

        dataId = dict(expId=int(visit), raftName=raft, detectorName=sensor)

        return dataId

    def getPostIsrCcd(self, visit, raft, sensor, aFilter=None):
        """Get the post-ISR CCD exposure.

        ISR: Instrument signature removal.
        CCD: Charge-coupled device.

        Parameters
        ----------
        visit : int
            Visit Id.
        raft : str
            Abbreviated raft name (e.g. "R22").
        sensor : str
            Abbreviated sensor name (e.g. "S11").
        aFilter : str, optional
            Active filter ("u", "g", "r", "i", "z", "y") (the default is None.)

        Returns
        -------
        lsst.afw.image.exposure.exposure.ExposureF
            Post-ISR CCD object.
        """

        dataId = self._getDefaultDataId(visit, raft, sensor)
        self._extendDataId(dataId, aFilter=aFilter)

        return self._butler.get("postISRCCD", dataId=dataId)

    def _extendDataId(self, dataId, snap=None, aFilter=None):
        """Extend the data Id.

        Parameters
        ----------
        dataId : dict
            Data Id.
        snap : int, optional
            Snap time (0 or 1) means first/ second exposure. (the default is
            None.)
        aFilter : str, optional
            Active filter ("u", "g", "r", "i", "z", "y") (the default is None.)
        """

        if (snap is not None) and isinstance(snap, (int, float)):
            dataId["snap"] = int(snap)

        if (aFilter is not None) and isinstance(aFilter, str):
            dataId["filter"] = aFilter

    def getEimage(self, visit, raft, sensor, snap=None):
        """Get the PhoSim eimage exposure.

        Parameters
        ----------
        visit : int
            Visit Id.
        raft : str
            Abbreviated raft name (e.g. "R22").
        sensor : str
            Abbreviated sensor name (e.g. "S11").
        snap : int, optional
            Snap time (0 or 1) means first/ second exposure. (the default is
            None.)

        Returns
        -------
        lsst.afw.image.exposure.exposure.ExposureF
            Eimage exposure object.
        """

        dataId = self._getDefaultDataId(visit, raft, sensor)
        self._extendDataId(dataId, snap=snap)

        return self._butler.get("eimage", dataId=dataId)

    @staticmethod
    def getImageData(exposure):
        """Get the image data.

        Parameters
        ----------
        exposure : lsst.afw.image.exposure.exposure.ExposureF
            Exposure object.

        Returns
        -------
        numpy.ndarray
            Image data.
        """

        # Get the numpy array data based on the input object type
        if isinstance(exposure, np.ndarray):
            data = exposure
        elif hasattr(exposure, "getMaskedImage"):
            data = exposure.getMaskedImage().getImage().getArray()
        elif hasattr(exposure, "getImage"):
            data = exposure.getImage().getArray()
        else:
            data = exposure.getArray()

        # Return the data in numpy array
        return data
field, tract = 'PSNJ07250042+2347030', 21

J_cat, H_cat, ref_table = read_cats(field, tract=tract, repo=rerun)

snr_threshold = 5
good_color = (J_cat['J_SNR'] > snr_threshold) & (H_cat['H_SNR'] > snr_threshold)

J_cat = J_cat[good_color]
H_cat = H_cat[good_color]
ref_table = ref_table[good_color]

butler = Butler(rerun)

dId = {'field': field, 'filter': 'H', 'tract': tract, 'patch': '0,0'}
calexp = butler.get('deepCoadd', dataId=dId)


# Find PSNJ07250042+2347030 based on RA, Dec: 07:25:00.408 +23:47:03.15 (J2000)

sn_RA, sn_Dec = 111.251700, 23.784208

sn_coord = afwGeom.SpherePoint(sn_RA, sn_Dec, afwGeom.degrees)

distList = []
for s in ref_table:
    this_coord = afwGeom.SpherePoint(s['coord_ra'], s['coord_dec'], afwGeom.radians)
    angSep = sn_coord.separation(this_coord)
    distList.append(angSep)

distance = np.array(distList)
Ejemplo n.º 33
0
import matplotlib.pyplot as plt
import pandas as pd

from astropy import time
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.table import vstack

from lsst.afw.geom import makeSkyWcs
from lsst.daf.persistence import Butler
from lsst.obs.lsst.imsim import ImsimMapper

calexprepo = '/global/cscratch1/sd/desc/DC2/data/Run2.1i/rerun/calexp-v1' 
b = Butler(calexprepo)
skymap = b.get('deepCoadd_skyMap')

template_repo = '/global/cscratch1/sd/bos0109/templates_rect'
tmprepo = template_repo + '/rerun/multiband'

diarepo = template_repo + '/rerun/diff_rect'
assocrepo = diarepo + '/rerun/assoc_thirrun'
forcerepo = assocrepo + '/rerun/forcedPhot' 

diabutler = Butler(forcerepo)

#truth_lightc = pd.read_csv('./lightcurves/lightcurves_cat_rect_58.0_56.0_-31.0_-32.0.csv')
#sntab = pd.read_csv('./catalogs+tables/supernovae_cat_rect_58.0_56.0_-31.0_-32.0.csv')
store = f'{SCRATCH}/results/diaSrc_thirrun_fulltables_v1.h5'
diaSrc_store = pd.HDFStore(store)
diaSrc_store.open()
Ejemplo n.º 34
0
    def runIsr(self):
        """
        this method takes the amplifier images from phosim, runs isr, and writes the new
        post-isr e-images with '_isr' appended to the filename.
        """
        outputDir = os.path.join(self.imageDir,
                                 'iter{}'.format(str(self.iIter)))
        flatsDir = os.path.join(self.aosSrcDir, '..', 'data', 'flats')
        repackagedDir = os.path.join(outputDir, 'repackaged')
        butlerDir = os.path.join(outputDir, 'butler')
        postISRDir = os.path.join(butlerDir, 'rerun', 'run1')

        if not os.path.exists(flatsDir):
            cwd = os.getcwd()
            os.mkdir(flatsDir)
            os.chdir(flatsDir)
            runProgram(
                'makeGainImages.py --detector_list R00_S22 R40_S02 R04_S20 R44_S00',
                verbose=True)
            os.chdir(cwd)

        if os.path.exists(repackagedDir):
            shutil.rmtree(repackagedDir)
        os.mkdir(repackagedDir)
        runProgram('phosim_repackager.py {} --out_dir {}'.format(
            outputDir, repackagedDir),
                   verbose=True)

        if os.path.exists(butlerDir):
            shutil.rmtree(butlerDir)
        os.mkdir(butlerDir)
        runProgram(
            'echo lsst.obs.lsst.phosim.PhosimMapper > {}/_mapper'.format(
                butlerDir),
            verbose=True)

        runProgram(
            'ingestCalibs.py {} {}/* --validity 9999 --output {} --mode copy'.
            format(butlerDir, flatsDir, butlerDir),
            verbose=True)
        runProgram('ingestImages.py {} {}/*.fits --clobber-config'.format(
            butlerDir, repackagedDir),
                   verbose=True)
        runProgram('runIsr.py {} --id --rerun run1 --config isr.doBias=False isr.doDark=False '\
                   'isr.doFlat=True isr.doFringe=False --clobber-config'.format(butlerDir),
                   verbose=True)

        # We don't want to import LSST stack dependencies unless we have to.
        # Once we have imported for the first time then it is fast.
        from lsst.daf.persistence import Butler

        butler = Butler(postISRDir)

        pattern = re.compile(
            'lsst_e_(\d+)_f\d_(R\d{2})_(S\d{2})_(C\d)_E(\d{3}).fits')

        for fname in os.listdir(outputDir):
            match = pattern.match(fname)
            if match:
                visit, raft, sensor, chip, snap = match.groups()

                dataId = {
                    'raftName': raft,
                    'visit': int(visit),
                    'detectorName': sensor,
                    'snap': int(snap)
                }
                data = butler.get('postISRCCD', dataId)
                img = data.getImage().getArray().transpose()

                if 'C0' in fname:
                    img = img[:, :2000]
                else:
                    img = img[:, 2000:]

                fitsIn = os.path.join(outputDir, fname)
                fitsOut = os.path.join(outputDir,
                                       '{}_isr.fits'.format(fname[:-5]))

                fitsPrimary = fits.open(fitsIn)[0]
                fitsPrimary.data = img
                fitsPrimary.writeto(fitsOut, overwrite=True)

        # butlerDir is ~ 5GB
        shutil.rmtree(butlerDir)
        shutil.rmtree(repackagedDir)
Ejemplo n.º 35
0
def load_patch(butler_or_repo,
               tract,
               patch,
               fields_to_join=('id', ),
               filters={
                   'u': 'u',
                   'g': 'g',
                   'r': 'r',
                   'i': 'i',
                   'z': 'z',
                   'y': 'y'
               },
               trim_colnames_for_fits=False,
               verbose=False,
               debug=False):
    """Load patch catalogs.  Return merged catalog across filters.

    butler_or_repo: Butler object or str
        Either a Butler object or a filename to the repo
    tract: int
        Tract in skymap
    patch: str
        Patch in the tract in the skymap
    fields_to_join: iterable of str
        Join the catalogs for each filter on these fields
    filters: iterable of str
        Filter names to load
    trim_colnames_for_fits: bool
        Trim column names to satisfy the FITS standard character limit of <68.

    Returns
    --
    Pandas DataFrame of patch catalog merged across filters.
    """
    if isinstance(butler_or_repo, str):
        butler = Butler(butler_or_repo)
    else:
        butler = butler_or_repo

    # Define the filters and order in which to sort them.:
    tract_patch_data_id = {'tract': tract, 'patch': patch}
    try:
        ref_table = butler.get(datasetType='deepCoadd_ref',
                               dataId=tract_patch_data_id)
        ref_table = ref_table.asAstropy().to_pandas()
    except NoResults as e:
        if verbose:
            print(" ", e)
        return pd.DataFrame()

    isPrimary = ref_table['detect_isPrimary']
    ref_table = ref_table[isPrimary]
    if len(ref_table) == 0:
        if verbose:
            print("  No good isPrimary entries for tract %d, patch %s" %
                  (tract, patch))
        return ref_table

    flux_field_names_per_schema_version = {
        1: {
            'psf_flux': 'base_PsfFlux_flux',
            'psf_flux_err': 'base_PsfFlux_fluxSigma',
            'modelfit_flux': 'modelfit_CModel_flux',
            'modelfit_flux_err': 'modelfit_CModel_fluxSigma'
        },
        2: {
            'psf_flux': 'base_PsfFlux_flux',
            'psf_flux_err': 'base_PsfFlux_fluxErr',
            'modelfit_flux': 'modelfit_CModel_flux',
            'modelfit_flux_err': 'modelfit_CModel_fluxErr'
        },
        3: {
            'psf_flux': 'base_PsfFlux_instFlux',
            'psf_flux_err': 'base_PsfFlux_instFluxErr',
            'modelfit_flux': 'modelfit_CModel_instFlux',
            'modelfit_flux_err': 'modelfit_CModel_instFluxErr'
        },
    }

    merge_filter_cats = {}
    for filt in filters:
        this_data = tract_patch_data_id.copy()
        this_data['filter'] = filters[filt]
        try:
            cat = butler.get(datasetType='deepCoadd_forced_src',
                             dataId=this_data)
        except NoResults as e:
            if verbose:
                print(" ", e)
            continue

        if debug:
            print("AFW photometry catalog schema version: {}".format(
                cat.schema.VERSION))
        flux_names = flux_field_names_per_schema_version[cat.schema.VERSION]

        # Convert the AFW table to an AstroPy table
        # because it's much easier to add column to an AstroPy table
        # than it is to set up a new schema for an AFW table.
        # cat = cat.asAstropy()

        # Try instead out converting the AFW->AstroPy->Pandas per cat
        # hoping to avoid memory copy
        # Then join in memory space.
        cat = cat.asAstropy().to_pandas()

        calib = butler.get('deepCoadd_calexp_calib', this_data)
        calib.setThrowOnNegativeFlux(False)

        mag, mag_err = calib.getMagnitude(
            cat[flux_names['psf_flux']].values,
            cat[flux_names['psf_flux_err']].values)

        cat['mag'] = mag
        cat['mag_err'] = mag_err
        cat['SNR'] = np.abs(cat[flux_names['psf_flux']] /
                            cat[flux_names['psf_flux_err']])

        modelfit_mag, modelfit_mag_err = calib.getMagnitude(
            cat[flux_names['modelfit_flux']].values,
            cat[flux_names['modelfit_flux_err']].values)

        cat['modelfit_mag'] = modelfit_mag
        cat['modelfit_mag_err'] = modelfit_mag_err
        cat['modelfit_SNR'] = np.abs(cat[flux_names['modelfit_flux']] /
                                     cat[flux_names['modelfit_flux_err']])

        cat = cat[isPrimary]

        merge_filter_cats[filt] = cat

    merged_patch_cat = ref_table
    for filt in filters:
        if filt not in merge_filter_cats:
            continue

        cat = merge_filter_cats[filt]
        if len(cat) < 1:
            continue
        # Rename duplicate columns with prefix of filter
        prefix_columns(cat, filt, fields_to_skip=fields_to_join)
        # Merge metadata with concatenation
        merged_patch_cat = pd.merge(merged_patch_cat,
                                    cat,
                                    on=fields_to_join,
                                    sort=False)

    if trim_colnames_for_fits:
        # FITS column names can't be longer that 68 characters
        # Trim here to ensure consistency across any format we write this out to
        trim_long_colnames(merged_patch_cat)

    return merged_patch_cat