Ejemplo n.º 1
0
 def setUp(self):
     self.butler = Butler(
         os.path.join(weeklyRerun, "pipeline", self.configuration,
                      "pipeline"))
     self.visits = dict(brn=getBrnVisits,
                        bmn=getBmnVisits)[self.configuration]()
     self.design = PfsDesign.read(1, weeklyRaw)
Ejemplo n.º 2
0
def loadAllTables(filters, newpath, oldpath, dataPath, patch, tract):
    logger.info("Loading new catalog")
    newCats = loadCatalogs(filters, newpath, "template")
    logger.info("Loading new flux conserved catalog")
    newCats2 = loadCatalogs(filters, newpath)
    logger.info("Loading old catalog")
    oldCats = loadCatalogs(filters, oldpath)
    logger.info("Building astropy tables")
    newFlux, newSed = getAllFlux(newCats, filters)
    newFlux2, newSed2 = getAllFlux(newCats2, filters)
    newTable = buildAllTables(newCats, newFlux, newSed, filters)
    newTable2 = buildAllTables(newCats2, newFlux2, newSed2, filters)
    newTable = newTable[(newTable["parent"] != 0) & ~np.isnan(newTable["x"])]
    newTable2 = newTable2[(newTable2["parent"] != 0)
                          & ~np.isnan(newTable2["x"])]
    oldTables = OrderedDict([(f, buildTable(oldCats[f])) for f in filters])
    logger.info("matching results")
    oldTable, matches = matchAllCatalogs(newTable, oldTables, filters)
    matchedNew = newTable[matches]
    matchedNew2 = newTable2[matches]

    logger.info("loading calexps")
    butler = Butler(inputs=dataPath)
    calexp = OrderedDict()
    for f in filters:
        calexp[f] = butler.get('deepCoadd_calexp',
                               patch=patch,
                               filter="HSC-" + f,
                               tract=tract)
    return oldTable, matchedNew, matchedNew2, calexp, newCats
Ejemplo n.º 3
0
    def makeCompatibleRepo(self, repoDir, calibRepoDir):
        """Set up a directory as a repository compatible with this dataset.

        If the directory already exists, any files required by the dataset will
        be added if absent; otherwise the directory will remain unchanged.

        Parameters
        ----------
        repoDir : `str`
            The directory where the output repository will be created.
        calibRepoDir : `str`
            The directory where the output calibration repository will be created.
        """
        mapperArgs = {'mapperArgs': {'calibRoot': calibRepoDir}}
        if _isRepo(self.templateLocation):
            # Stub repo is not a parent because can't mix v1 and v2 repositories in parents list
            Butler(inputs=[{
                "root": self.templateLocation,
                "mode": "r"
            }],
                   outputs=[{
                       "root": repoDir,
                       "mode": "rw",
                       **mapperArgs
                   }])
        else:
            Butler(inputs=[{
                "root": self._stubInputRepo,
                "mode": "r"
            }],
                   outputs=[{
                       "root": repoDir,
                       "mode": "rw",
                       **mapperArgs
                   }])
Ejemplo n.º 4
0
def load_tract(repo, tract, patches=None, **kwargs):
    """Merge catalogs from forced-photometry coadds across available filters.

    Parameters
    --
    tract: int
        Tract of sky region to load
    repo: str
        File location of Butler repository+rerun to load.
    patches: list of str
        List of patches.  If not specified, will default to '0,0'--'7,7'.

    Returns
    --
    AstroPy Table of merged catalog
    """
    butler = Butler(repo)

    if patches is None:
        # Extract the patches for this tract from the skymap
        skymap = butler.get(datasetType='deepCoadd_skyMap')
        patches = ['%d,%d' % patch.getIndex() for patch in skymap[tract]]

    merged_patch_cats = []
    for patch in patches:
        this_patch_merged_cat = load_patch(butler, tract, patch, **kwargs)
        # Event if this_patch_merged_cat is an empty Table, it's still fine to append to the list here.
        # They will get vstacked away below.
        merged_patch_cats.append(this_patch_merged_cat)

    merged_tract_cat = vstack(merged_patch_cats)
    return merged_tract_cat
Ejemplo n.º 5
0
def load_tract(repo, tract, patches=None, **kwargs):
    """Merge catalogs from forced-photometry coadds across available filters.

    Parameters
    --
    tract: int
        Tract of sky region to load
    repo: str
        File location of Butler repository+rerun to load.
    patches: list of str
        List of patches.  If not specified, will default to '0,0'--'7,7'.

    Returns
    --
    Pandas DataFrame of merged catalog
    """
    butler = Butler(repo)

    if patches is None:
        # Extract the patches for this tract from the skymap
        skymap = butler.get(datasetType='deepCoadd_skyMap')
        patches = ['%d,%d' % patch.getIndex() for patch in skymap[tract]]

    merged_tract_cat = pd.DataFrame()
    for patch in patches:
        this_patch_merged_cat = load_patch(butler, tract, patch, **kwargs)
        merged_tract_cat.append(this_patch_merged_cat)

    return merged_tract_cat
Ejemplo n.º 6
0
class MatplotlibStorageTestCase(lsst.utils.tests.TestCase):
    def setUp(self):
        inputDir = os.path.join(ROOT, "data", "input")
        self.testDir = tempfile.mkdtemp(dir=os.path.join(ROOT, 'tests'),
                                        prefix=type(self).__name__ + '-')
        self.butler = Butler(inputs=inputDir,
                             outputs={
                                 "root": self.testDir,
                                 "mode": 'rw'
                             })

    def tearDown(self):
        del self.butler
        if os.path.exists(self.testDir):
            shutil.rmtree(self.testDir)

    def testWriteFigure(self):
        """Test writing a matpotlib figure to a repository."""
        import matplotlib
        matplotlib.use("Agg")
        from matplotlib import pyplot
        fig = pyplot.figure()
        pyplot.plot([0, 1], [0, 1], "k")
        self.butler.put(fig, "test_plot", visit=1, filter="g")
        self.assertTrue(
            self.butler.datasetExists("test_plot", visit=1, filter="g"))
        self.assertTrue(
            os.path.exists(self.butler.getUri("test_plot", visit=1,
                                              filter="g")))
Ejemplo n.º 7
0
 def testCameraFromButler(self):
     """Test that the butler can return the camera
     """
     butler = Butler(self.input)
     camera = butler.get("camera", immediate=True)
     self.assertEqual(camera.getName(), self.mapper.camera.getName())
     self.assertEqual(len(camera), len(self.mapper.camera))
     self.assertEqual(len(camera[0]), len(self.mapper.camera[0]))
Ejemplo n.º 8
0
 def setUp(self):
     inputDir = os.path.join(ROOT, "data", "input")
     self.testDir = tempfile.mkdtemp(dir=os.path.join(ROOT, 'tests'),
                                     prefix=type(self).__name__ + '-')
     self.butler = Butler(inputs=inputDir,
                          outputs={
                              "root": self.testDir,
                              "mode": 'rw'
                          })
Ejemplo n.º 9
0
def makeLinearizerDecam(fromFile, force=False, verbose=False):
    """Convert the specified DECam linearity FITS table to standard LSST format

    Details:
    - Input format is one table per CCD, HDU is amplifier number,
        the table has 3 columns: ADU, ADU_LINEAR_A, ADU_LINEAR_B.
        The values of ADU contiguous (0, 1, 2...) but check and error out if not.
        The values of the latter two are replacements (0+delta0, 1+delta1, 2+delta2...)
        and this is converted to offsets for the LSST linearization tables (delta0, delta1, delta2...)
    - Output is a set of LinearizeLookupTable instances, one per CCD, saved as dataset type "linearizer"
    - The row indices for the linearization lookup table are (row index=amp name): 0=A, 1=B

    @param[in] fromFile  path to DECam linearity table (a FITS file with one HDU per amplifier)
    """
    print("Making DECam linearizers from %r" % (fromFile, ))
    butler = Butler(mapper=DecamMapper)
    linearizerDir = DecamMapper.getLinearizerDir()
    if os.path.exists(linearizerDir):
        if not force:
            print("Output directory %r exists; use --force to replace" %
                  (linearizerDir, ))
            sys.exit(1)
        print("Replacing data in linearizer directory %r" % (linearizerDir, ))
    else:
        print("Creating linearizer directory %r" % (linearizerDir, ))
        os.makedirs(linearizerDir)

    camera = DecamMapper().camera
    fromHDUs = fits.open(fromFile)[1:]  # HDU 0 has no data
    assert len(fromHDUs) == len(camera)
    for ccdind, (detector, hdu) in enumerate(zip(camera, fromHDUs)):
        ccdnum = ccdind + 1
        if verbose:
            print("ccdnum=%s; detector=%s" % (ccdnum, detector.getName()))
        fromData = hdu.data
        assert len(fromData.dtype) == 3
        lsstTable = np.zeros((2, len(fromData)), dtype=np.float32)
        uncorr = fromData["ADU"]
        if not np.allclose(uncorr, np.arange(len(fromData))):
            raise RuntimeError(
                "ADU data not a range of integers starting at 0")
        for i, ampName in enumerate("AB"):
            # convert DECam replacement table to LSST offset table
            if verbose:
                print("DECam table for %s=%s..." % (
                    ampName,
                    fromData["ADU_LINEAR_" + ampName][0:5],
                ))
            lsstTable[i, :] = fromData["ADU_LINEAR_" + ampName] - uncorr
            if verbose:
                print("LSST  table for %s=%s..." % (
                    ampName,
                    lsstTable[i, 0:5],
                ))
        linearizer = LinearizeLookupTable(table=lsstTable, detector=detector)
        butler.put(linearizer, "linearizer", dataId=dict(ccdnum=ccdnum))
    print("Wrote %s linearizers" % (ccdind + 1, ))
Ejemplo n.º 10
0
    def ingest(cls, root, camera, visit, filenames, sensors, metadata):
        """Add all images from an external visit (a full-focal-plane
        exposure) to a data repository.

        This both symlinks the external data files to the appropriate
        location in the directory structure and adds the necessary
        rows to the SQLite registry tables.

        Parameters
        ----------
        root : str
            Directory of the data repository to add data to.  Must have
            an existing "registry.sqlite3" file present directly in the
            root and a _mapper file pointing to HscAndExtMapper.
        camera : str
            Name of the camera used to produced the external observation.
            Must have an entry in ExternalImage.CAMERA_INFO.
        visit : int
            Original integer visit ID for the observation, *before* adding
            CAMERA_INFO[camera]["ID"]*CAMERA_ID_MULTIPLIER.
        filenames : list
            A list of file names containing the external data files, either
            relative to the current directory or absolute.
        sensors : list
            A list of integer sensor IDs corresponding to the filenames list.
        metadata : VisitMetadata
            An object containing additional metadata for this visit to be
            added to the registry.  See VisitMetadata for a description of
            what attributes are required.
        """
        db = sqlite3.connect(os.path.join(root, "registry.sqlite3"))
        butler = Butler(inputs=[root])
        visit += cls.CAMERA_INFO[camera]["id"] * cls.CAMERA_ID_MULTIPLIER
        ccdCols = [
            "filter", "dateObs", "taiObs", "field", "expId", "pointing",
            "dataType", "pa"
        ]
        ccdSql = "INSERT INTO raw (visit, ccd, {}) VALUES (?, ?, {})".format(
            ", ".join(ccdCols), ", ".join(["?"] * len(ccdCols)))
        ccdValues = tuple(getattr(metadata, col) for col in ccdCols)
        visitCols = ["filter", "dateObs", "taiObs", "field"]
        visitSql = "INSERT INTO raw_visit (visit, {}) VALUES (?, {})".format(
            ", ".join(visitCols), ", ".join(["?"] * len(visitCols)))
        visitValues = tuple(getattr(metadata, col) for col in visitCols)
        for filename, sensor in zip(filenames, sensors):
            outputFileName = butler.get("external_filename",
                                        visit=visit,
                                        ccd=sensor)[0]
            os.symlink(filename, outputFileName)
            db.execute(ccdSql, (
                visit,
                sensor,
            ) + ccdValues)
        db.execute(visitSql, (visit, ) + visitValues)
        db.commit()
        return visit
Ejemplo n.º 11
0
 def testDetectors(self):
     """Test that the detector returned by the gen 2 butler is the same
     as the expected one.
     """
     for root, did, expected in zip(self.roots, self.ids, self.expecteds):
         butler = Butler(root)
         raw = butler.get("raw", dataId=did)
         for amp1, amp2 in zip(expected['detector'], raw.getDetector()):
             with self.subTest(amp=amp1.getName()):
                 self.assertEqual(amp1.getName(), amp2.getName())
                 self.assertAmpRawBBoxesEqual(amp1, amp2)
Ejemplo n.º 12
0
 def testAssemble(self):
     """Test the assembly of E2V and ITL sensors
     """
     task = AssembleCcdTask()
     # exclude LATISS for this test since we don't have an expected output
     for root, did, expected in zip(self.roots, self.ids, self.expecteds):
         butler = Butler(root)
         raw = butler.get("raw", dataId=did)
         assembled = task.assembleCcd(raw)
         count = numpy.sum(expected['expected'].read().array -
                           assembled.getImage().array)
         self.assertEqual(count, 0)
Ejemplo n.º 13
0
class SimpleButlerInterface(BaseGenericCatalog):
    """
    A simple butler interface.

    Args:
    -----
    repo: str
        path to repository containing the DM-processed data.
    dataId: dict
    """
    def _subclass_init(self, repo, datasetType, dataId=None, **kwargs):

        if not _HAS_LSST_STACK:
            raise RuntimeError('LSST Stack not available')

        self._butler = Butler(repo)
        self._datasetType = datasetType
        self._dataId_cache = self._butler.subset(self._datasetType,
                                                 dataId=dataId).cache

        self._columns = None
        for dataId in self._dataId_cache:
            data = self._get_data(dataId)
            if data is not None:
                self._columns = data.schema.getNames()
                break

        if not self._columns:
            raise RuntimeError('No datasets or columns found!')

    def _get_data(self, dataId, datasetType=None):
        try:
            data = self._butler.get(datasetType or self._datasetType,
                                    dataId=dataId)
        except NoResults:
            return None
        return data

    def _generate_quantity_getter(self, dataId):
        data = self._get_data(dataId)
        if data is None:
            return
        return data.get

    def _iter_native_dataset(self, native_filters=None):
        for dataId in self._dataId_cache:
            if native_filters is None or native_filters.check_scalar(dataId):
                quantity_getter = self._generate_quantity_getter(dataId)
                if quantity_getter is not None:
                    yield quantity_getter

    def _generate_native_quantity_list(self):
        return self._columns
Ejemplo n.º 14
0
def load_and_save_tract(repo,
                        tract,
                        filename,
                        key_prefix='coadd',
                        patches=None,
                        overwrite=True,
                        verbose=False,
                        **kwargs):
    """Save catalogs to HDF5 from forced-photometry coadds across available filters.

    Iterates through patches, saving each in append mode to the save HDF5 file.

    Parameters
    --
    repo: str
        File location of Butler repository+rerun to load.
    tract: int
        Tract of sky region to load
    filename: str
        Filename for HDF file.
    key_prefix: str
        Base for the key in the HDF file.
        Keys will be of the form "%s_%d_%s" % (keybase, tract, patch)
        With the addition that the comma will be removed from the patch name
        to provide a valid Python identifier: e.g., 'coadd_4849_11'
    overwrite: bool
        Overwrite an existing HDF file.
    """
    butler = Butler(repo)

    if patches is None:
        # Extract the patches for this tract from the skymap
        skymap = butler.get(datasetType='deepCoadd_skyMap')
        patches = ['%d,%d' % patch.getIndex() for patch in skymap[tract]]

    for patch in patches:
        if verbose:
            print("Processing tract %d, patch %s" % (tract, patch))
        patch_merged_cat = load_patch(butler,
                                      tract,
                                      patch,
                                      verbose=verbose,
                                      **kwargs)
        if len(patch_merged_cat) == 0:
            if verbose:
                print("  No good entries for tract %d, patch %s" %
                      (tract, patch))
            continue

        key = '%s_%d_%s' % (key_prefix, tract, patch)
        key = valid_identifier_name(key)
        patch_merged_cat.to_pandas().to_hdf(filename, key, format='fixed')
Ejemplo n.º 15
0
def makeLinearizerDecam(fromFile, force=False, verbose=False):
    """Convert the specified DECam linearity FITS table to standard LSST format

    Details:
    - Input format is one table per CCD, HDU is amplifier number,
        the table has 3 columns: ADU, ADU_LINEAR_A, ADU_LINEAR_B.
        The values of ADU contiguous (0, 1, 2...) but check and error out if not.
        The values of the latter two are replacements (0+delta0, 1+delta1, 2+delta2...)
        and this is converted to offsets for the LSST linearization tables (delta0, delta1, delta2...)
    - Output is a set of LinearizeLookupTable instances, one per CCD, saved as dataset type "linearizer"
    - The row indices for the linearization lookup table are (row index=amp name): 0=A, 1=B

    @param[in] fromFile  path to DECam linearity table (a FITS file with one HDU per amplifier)
    """
    print("Making DECam linearizers from %r" % (fromFile,))
    butler = Butler(mapper=DecamMapper)
    linearizerDir = DecamMapper.getLinearizerDir()
    if os.path.exists(linearizerDir):
        if not force:
            print("Output directory %r exists; use --force to replace" % (linearizerDir,))
            sys.exit(1)
        print("Replacing data in linearizer directory %r" % (linearizerDir,))
    else:
        print("Creating linearizer directory %r" % (linearizerDir,))
        os.makedirs(linearizerDir)

    camera = DecamMapper().camera
    fromHDUs = fits.open(fromFile)[1:] # HDU 0 has no data
    assert len(fromHDUs) == len(camera)
    for ccdind, (detector, hdu) in enumerate(izip(camera, fromHDUs)):
        ccdnum = ccdind + 1
        if verbose:
            print("ccdnum=%s; detector=%s" % (ccdnum, detector.getName()))
        fromData = hdu.data
        assert len(fromData.dtype) == 3
        lsstTable = np.zeros((2, len(fromData)), dtype=np.float32)
        uncorr = fromData["ADU"]
        if not np.allclose(uncorr, np.arange(len(fromData))):
            raise RuntimeError("ADU data not a range of integers starting at 0")
        for i, ampName in enumerate("AB"):
            # convert DECam replacement table to LSST offset table
            if verbose:
                print("DECam table for %s=%s..." % (ampName, fromData["ADU_LINEAR_" + ampName][0:5],))
            lsstTable[i,:] = fromData["ADU_LINEAR_" + ampName] - uncorr
            if verbose:
                print("LSST  table for %s=%s..." % (ampName, lsstTable[i,0:5],))
        linearizer = LinearizeLookupTable(table=lsstTable, detector=detector)
        butler.put(linearizer, "linearizer", dataId=dict(ccdnum=ccdnum))
    print("Wrote %s linearizers" % (ccdind+1,))
Ejemplo n.º 16
0
    def connect(self):
        # search for metadata.yaml file
        # 1. Look in path directory i.e. '/project/tmorton/tickets/DM-20015/RC2_w18/metadata.yaml'
        # 2. Look for datafolder in current directory i.e. './RC2_w18/metadata.yaml'
        # 3. Look for datafolder in dir specified in LSST_META env variable i.e. /user/name/lsst_meta/RC2_w18/metadata.yaml'
        #    when LSST_META='/user/name/lsst_meta'
      
        print('-- read metadata file --')

        # if Butler is available use it to connect. If not available we are reading from disk
        if Butler:
            try:
                print('-- connect to butler --')
                self.conn = Butler(str(self.path))
                self.metadata = self.conn.get('qaDashboard_metadata')
                self.failures = self.metadata.get('failures', {})
                if not self.filters:
                    self.filters = list(self.metadata['visits'].keys())
                if not self.tracts:
                    all_tracts = [list(self.metadata['visits'][filt].keys()) for filt in self.filters]
                    self.tracts = list(set([int(y) for x in all_tracts for y in x]))
            except:
                print(f'{self.path} is not available in Butler attempting to read parquet files instead')
        else:
            if self.path.joinpath(METADATA_FILENAME).exists():
                self.metadata_path = self.path.joinpath(METADATA_FILENAME)
            else:
                self.metadata_path = Path(os.environ.get('LSST_META', os.curdir)).joinpath(self.path.name, METADATA_FILENAME)

            with self.metadata_path.open('r') as f:
                self.metadata = yaml.load(f, Loader=yaml.SafeLoader)
                self.failures = self.metadata.get('failures', {})
                if self.tracts is None:
                    self.tracts = list(set(x for v in self.metadata['visits'].values() for x in v.keys())) 


        print('-- read coadd table --')
        self.fetch_coadd_table()  # currently ignoring forced/unforced
        # update metadata based on coadd table fields
        print('-- generate other metadata fields --')
        df = self.coadd['qaDashboardCoaddTable']
        self.flags = df.columns[df.dtypes == bool].to_list()
        if not Butler:
            self.filters = list(self.metadata['visits'].keys()) 
        self.metrics = set(df.columns.to_list()) - set(self.flags) - set(['patch', 'dec', 'label', 'psfMag', 
                                                                         'ra', 'filter', 'dataset', 'dir0', 'tract'])
        print('-- read visit data --')
        self.fetch_visits_by_metric()
        print('-- done with reads --')
Ejemplo n.º 17
0
 def testExposureIdInfo(self):
     butler = Butler(self.input)
     expIdBits = self.mapper.bypass_ccdExposureId_bits( # args are ignored
         datasetType = None,
         pythonType = int,
         location = None,
         dataId = dict(),
     )
     for visit in (1, 2, 3):
         dataId = dict(visit=visit)
         expIdInfo = butler.get("expIdInfo", dataId=dataId)
         self.assertEqual(expIdInfo.expId, visit)
         self.assertEqual(expIdInfo.expBits, expIdBits)
         self.assertEqual(expIdInfo.maxBits, 64)
         self.assertEqual(expIdInfo.unusedBits, expIdInfo.maxBits-expIdBits)
    def testBasics(self):
        """Test detection and measurement on simple synthesized data
        """
        bbox = Box2I(Point2I(256, 100), Extent2I(128, 127))
        minCounts = 5000
        maxCounts = 50000
        starSigma = 1.5
        numX = 5
        numY = 5
        coordList = self.makeCoordList(
            bbox=bbox,
            numX=numX,
            numY=numY,
            minCounts=minCounts,
            maxCounts=maxCounts,
            sigma=starSigma,
        )
        kwid = 11  # kernel width
        sky = 2000
        # create an exposure without a Wcs; add the Wcs later
        exposure = plantSources(bbox=bbox,
                                kwid=kwid,
                                sky=sky,
                                coordList=coordList,
                                addPoissonNoise=True)

        schema = SourceTable.makeMinimalSchema()

        config = DetectAndMeasureTask.ConfigClass()
        task = DetectAndMeasureTask(config=config, schema=schema)

        butler = Butler(root=InputDir)
        dataRef = butler.dataRef("calexp", dataId=dict(visit=1))
        wcs = dataRef.get("raw").getWcs()
        exposure.setWcs(wcs)
        exposureIdInfo = dataRef.get("expIdInfo")
        taskRes = task.run(exposure=exposure, exposureIdInfo=exposureIdInfo)
        self.assertEqual(len(taskRes.sourceCat), numX * numY)
        schema = taskRes.sourceCat.schema
        centroidFlagKey = schema.find("slot_Centroid_flag").getKey()
        parentKey = schema.find("parent").getKey()
        psfFluxFlagKey = schema.find("slot_PsfFlux_flag").getKey()
        psfFluxKey = schema.find("slot_PsfFlux_flux").getKey()
        for src in taskRes.sourceCat:
            self.assertFalse(src.get(centroidFlagKey))  # centroid found
            self.assertEqual(src.get(parentKey), 0)  # not debelended
            self.assertFalse(src.get(psfFluxFlagKey))  # flux measured
            self.assertGreater(src.get(psfFluxKey), 4000)  # flux sane
Ejemplo n.º 19
0
    def setUp(self):

        # Load sample input from disk
        testDir = os.path.dirname(__file__)
        self.srcCat = afwTable.SourceCatalog.readFits(
            os.path.join(testDir, "data", "v695833-e0-c000.xy.fits"))

        self.srcCat["slot_ApFlux_fluxSigma"] = 1
        self.srcCat["slot_PsfFlux_fluxSigma"] = 1

        # The .xy.fits file has sources in the range ~ [0,2000],[0,4500]
        # which is bigger than the exposure
        self.bbox = afwGeom.Box2I(afwGeom.Point2I(0, 0),
                                  afwGeom.Extent2I(2048, 4612))
        smallExposure = afwImage.ExposureF(
            os.path.join(testDir, "data", "v695833-e0-c000-a00.sci.fits"))
        self.exposure = afwImage.ExposureF(self.bbox)
        self.exposure.setWcs(smallExposure.getWcs())
        self.exposure.setFilter(smallExposure.getFilter())
        self.exposure.setCalib(smallExposure.getCalib())

        # Make a reference loader
        butler = Butler(RefCatDir)
        self.refObjLoader = LoadIndexedReferenceObjectsTask(butler=butler)
        logLevel = Log.TRACE
        self.log = Log.getLogger('testPhotoCal')
        self.log.setLevel(logLevel)

        self.config = PhotoCalConfig()

        # The test and associated data have been prepared on the basis that we
        # use the PsfFlux to perform photometry.
        self.config.fluxField = "base_PsfFlux_flux"
Ejemplo n.º 20
0
def load_tract(repo, tract, patches=None, **kwargs):
    """Merge catalogs from forced-photometry coadds across available filters.

    Parameters
    --
    tract: int
        Tract of sky region to load
    repo: str
        File location of Butler repository+rerun to load.
    patches: list of str
        List of patches.  If not specified, will default to '0,0'--'7,7'.

    Returns
    --
    AstroPy Table of merged catalog
    """
    butler = Butler(repo)
    if patches is None:
        patches = ['%d,%d' % (i, j) for i in range(8) for j in range(8)]

    merged_patch_cats = []
    for patch in patches:
        try:
            this_patch_merged_cat = load_patch(butler, tract, patch, **kwargs)
        except NoResults as e:
            print(e)
            continue
        merged_patch_cats.append(this_patch_merged_cat)

    merged_tract_cat = vstack(merged_patch_cats)
    return merged_tract_cat
Ejemplo n.º 21
0
def get_butler_by_repo(repo, **kwargs):
    """Construct and return a Bulter for the requested repository

    Parameters
    ----------
    repo : `str`
        Name of the repo, e.g., 'TS8' | 'BOT'
    kwargs
        Passed to the Bulter constructor

    Returns
    -------
    butler : `Butler`
        the requested Bulter

    Raises
    ------
    KeyError : If repo does not match any known repository
    """
    try:
        repo_path = BUTLER_REPO_DICT[repo]
    except KeyError:
        raise KeyError("Unknown Bulter repo key %s" % repo)
    butler = Butler(repo_path, **kwargs)
    return butler
    def setUp(self):
        refCatDir = os.path.join(os.path.dirname(__file__), "data",
                                 "sdssrefcat")

        self.bbox = afwGeom.Box2I(afwGeom.Point2I(0, 0),
                                  afwGeom.Extent2I(3001, 3001))
        self.ctrPix = afwGeom.Point2I(1500, 1500)
        metadata = dafBase.PropertySet()
        metadata.set("RADECSYS", "FK5")
        metadata.set("EQUINOX", 2000.0)
        metadata.set("CTYPE1", "RA---TAN")
        metadata.set("CTYPE2", "DEC--TAN")
        metadata.set("CUNIT1", "deg")
        metadata.set("CUNIT2", "deg")
        metadata.set("CRVAL1", 215.5)
        metadata.set("CRVAL2", 53.0)
        metadata.set("CRPIX1", self.ctrPix[0] + 1)
        metadata.set("CRPIX2", self.ctrPix[1] + 1)
        metadata.set("CD1_1", 5.1e-05)
        metadata.set("CD1_2", 0.0)
        metadata.set("CD2_2", -5.1e-05)
        metadata.set("CD2_1", 0.0)
        self.tanWcs = afwImage.makeWcs(metadata)
        self.exposure = afwImage.ExposureF(self.bbox)
        self.exposure.setWcs(self.tanWcs)
        self.exposure.setFilter(afwImage.Filter("r", True))
        butler = Butler(refCatDir)
        self.refObjLoader = LoadIndexedReferenceObjectsTask(butler=butler)
Ejemplo n.º 23
0
    def __init__(
        self, butlerpath, destination=None, dataset=None, engine="pyarrow", sample_frac=None, num_buckets=8,
    ):

        self._butler = Butler(butlerpath)
        if dataset is None:
            dataset = self._default_dataset

        self.dataset = dataset
        if destination is None:
            destination = f"{butlerpath}/ktk"
        self.destination = destination
        self.sample_frac = sample_frac
        self.num_buckets = num_buckets

        self.stats_path = f"{self.destination}/{self.dataset}_stats.parq"

        self._store = None
        self.engine = engine
        self.metadata = self.butler.get("qaDashboard_info")

        self.dataIds = [
            dataId for dataId in self.iter_dataId() if self.butler.datasetExists(self.dataset, dataId)
        ]

        self.filters = [filt for filt in self.metadata["visits"].keys()]
        self.dataIds_by_filter = {
            filt: [d for d in self.dataIds if d["filter"] == filt] for filt in self.filters
        }

        self._filenames = None
        self._filenames_by_filter = None
Ejemplo n.º 24
0
def __main__(args):
    repo_directory = "/lsstdata/offline/teststand/BOT/gen2repo"
    butler = Butler(repo_directory)
    runs = ['12672', '12673', '12844', '12845', '12853', '12855']
    run = runs[0]
    sensors = set(
        butler.queryMetadata('raw', ['raftName', 'detectorName'],
                             dataId={
                                 'run': run,
                                 'imageType': 'BIAS'
                             }))

    run_dark_regression_analysis_on_all_sensors(butler,
                                                runs,
                                                sensors,
                                                show_plots=False)
Ejemplo n.º 25
0
    def setUp(self):
        # Load sample input from disk
        testDir = os.path.dirname(__file__)

        self.srcSet = SourceCatalog.readFits(os.path.join(testDir, "v695833-e0-c000.xy.fits"))

        self.bbox = afwGeom.Box2I(afwGeom.Point2I(0, 0), afwGeom.Extent2I(2048, 4612))  # approximate
        # create an exposure with the right metadata; the closest thing we have is
        # apparently v695833-e0-c000-a00.sci.fits, which is much too small
        smallExposure = ExposureF(os.path.join(testDir, "v695833-e0-c000-a00.sci.fits"))
        self.exposure = ExposureF(self.bbox)
        self.exposure.setWcs(smallExposure.getWcs())
        self.exposure.setFilter(smallExposure.getFilter())
        # copy the pixels we can, in case the user wants a debug display
        mi = self.exposure.getMaskedImage()
        mi.assign(smallExposure.getMaskedImage(), smallExposure.getBBox())

        logLevel = Log.INFO
        refCatDir = os.path.join(testDir, "data", "sdssrefcat")
        butler = Butler(refCatDir)
        refObjLoader = LoadIndexedReferenceObjectsTask(butler=butler)
        astrometryConfig = AstrometryTask.ConfigClass()
        self.astrom = AstrometryTask(config=astrometryConfig, refObjLoader=refObjLoader)
        self.astrom.log.setLevel(logLevel)
        # Since our sourceSelector is a registry object we have to wait for it to be created
        # before setting default values.
        self.astrom.matcher.sourceSelector.config.minSnr = 0
Ejemplo n.º 26
0
class FitsStorageTestCase(lsst.utils.tests.TestCase):
    def setUp(self):
        inputDir = os.path.join(ROOT, "data", "input")
        self.testDir = tempfile.mkdtemp(dir=os.path.join(ROOT, 'tests'), prefix=type(self).__name__+'-')
        self.butler = Butler(inputs=inputDir, outputs={"root": self.testDir, "mode": 'rw'})

    def tearDown(self):
        del self.butler
        if os.path.exists(self.testDir):
            shutil.rmtree(self.testDir)

    def testReadPropertyList(self):
        """Test that reading a FitsStorage dataset into a PropertyList
        is the same as using the special _md Exposure hook.
        """
        md1 = self.butler.get("raw_md", visit=1, filter="g")
        md2 = self.butler.get("rawMetadataDirect", visit=1, filter="g")
        self.assertEqual(md1, md2)
Ejemplo n.º 27
0
def get_diff_calexp_coadd_butler(diff_repo, calexp_repo, coadd_repo):
    """
    Returns:
    butler_dict = {
        'diff': diff_butler,
        'calexp_butler': calexp_butler,
        'coadd_butler': coadd_butler
    }
    """
    diff_butler = Butler(diff_repo)
    calexp_butler = Butler(calexp_repo)
    coadd_butler = Butler(coadd_repo)
    butler_dict = {
        'diff': diff_butler,
        'calexp': calexp_butler,
        'coadd': coadd_butler
    }
    return butler_dict
 def setUp(self):
     np.random.seed(12345)
     self.butler = Butler(RefCatDir)
     refObjLoader = LoadIndexedReferenceObjectsTask(butler=self.butler)
     center = lsst.afw.geom.SpherePoint(215.5, 53.0, lsst.afw.geom.degrees)
     radius = 0.5 * lsst.afw.geom.degrees
     self.filter = "r"
     self.references = refObjLoader.loadSkyCircle(center, radius,
                                                  self.filter).refCat
Ejemplo n.º 29
0
def main(visit, detector, diarepo, outputdir='.'):
    diabutler = Butler(diarepo)

    diff_img_path = os.path.join(outputdir,
                                 f'diff_exposure_v{visit}_d{detector}.fits')
    science_img_path = os.path.join(
        outputdir, f'science_exposure_v{visit}_d{detector}.fits')

    imgD = diabutler.get('deepDiff_differenceExp',
                         visit=visit,
                         detector=detector)
    imgD.writeFits(diff_img_path)

    imgS = diabutler.get('calexp', visit=visit, detector=detector)
    imgS.writeFits(science_img_path)

    print(f'files copied to {outputdir}')
    return
Ejemplo n.º 30
0
    def testBasics(self):
        """Test detection and measurement on simple synthesized data
        """
        bbox = Box2I(Point2I(256, 100), Extent2I(128, 127))
        minCounts = 5000
        maxCounts = 50000
        starSigma = 1.5
        numX = 5
        numY = 5
        coordList = self.makeCoordList(
            bbox=bbox,
            numX=numX,
            numY=numY,
            minCounts=minCounts,
            maxCounts=maxCounts,
            sigma=starSigma,
        )
        kwid = 11  # kernel width
        sky = 2000
        # create an exposure without a Wcs; add the Wcs later
        exposure = plantSources(bbox=bbox, kwid=kwid, sky=sky, coordList=coordList, addPoissonNoise=True)

        schema = SourceTable.makeMinimalSchema()

        config = DetectAndMeasureTask.ConfigClass()
        task = DetectAndMeasureTask(config=config, schema=schema)

        butler = Butler(root=InputDir)
        dataRef = butler.dataRef("calexp", dataId=dict(visit=1))
        wcs = dataRef.get("raw").getWcs()
        exposure.setWcs(wcs)
        exposureIdInfo = dataRef.get("expIdInfo")
        taskRes = task.run(exposure=exposure, exposureIdInfo=exposureIdInfo)
        self.assertEqual(len(taskRes.sourceCat), numX * numY)
        schema = taskRes.sourceCat.schema
        centroidFlagKey = schema.find("slot_Centroid_flag").getKey()
        parentKey = schema.find("parent").getKey()
        psfFluxFlagKey = schema.find("slot_PsfFlux_flag").getKey()
        psfFluxKey = schema.find("slot_PsfFlux_flux").getKey()
        for src in taskRes.sourceCat:
            self.assertFalse(src.get(centroidFlagKey))  # centroid found
            self.assertEqual(src.get(parentKey), 0)     # not debelended
            self.assertFalse(src.get(psfFluxFlagKey))   # flux measured
            self.assertGreater(src.get(psfFluxKey), 4000)  # flux sane
Ejemplo n.º 31
0
    def testBasics(self):
        """Test construction of a discrete sky map
        """
        butler = Butler(inputs=self.inPath,
                        outputs={
                            'root': self.outPath,
                            'mode': 'rw'
                        })
        coordList = []  # list of sky coords of all corners of all calexp
        for dataId in (
                dict(visit=1, filter="g"),
                dict(visit=2, filter="g"),
                dict(visit=3, filter="r"),
        ):
            # TODO: pybind11 remove `immediate=True` once DM-9112 is resolved
            rawImage = butler.get("raw", dataId, immediate=True)
            # fake calexp by simply copying raw data; the task just cares about its bounding box
            # (which is slightly larger for raw, but that doesn't matter for this test)
            calexp = rawImage
            butler.put(calexp, "calexp", dataId)
            calexpWcs = calexp.getWcs()
            calexpBoxD = Box2D(calexp.getBBox())
            coordList += [
                calexpWcs.pixelToSky(corner)
                for corner in calexpBoxD.getCorners()
            ]

        # use the calexp to make a sky map
        retVal = MakeDiscreteSkyMapTask.parseAndRun(
            args=[self.inPath, "--output", self.outPath, "--id", "filter=g^r"],
            config=self.config,
            doReturnResults=True,
        )
        self.assertEqual(len(retVal.resultList), 1)
        skyMap = retVal.resultList[0].result.skyMap
        self.assertEqual(type(skyMap), DiscreteSkyMap)
        self.assertEqual(len(skyMap), 1)
        tractInfo = skyMap[0]
        self.assertEqual(tractInfo.getId(), 0)
        self.assertEqual(tractInfo.getNumPatches(), Extent2I(3, 3))
        tractWcs = tractInfo.getWcs()
        tractBoxD = Box2D(tractInfo.getBBox())
        for skyPoint in coordList:
            self.assertTrue(tractBoxD.contains(tractWcs.skyToPixel(skyPoint)))
Ejemplo n.º 32
0
    def _subclass_init(self, repo, datasetType, dataId=None, **kwargs):

        if not _HAS_LSST_STACK:
            raise RuntimeError('LSST Stack not available')

        self._butler = Butler(repo)
        self._datasetType = datasetType
        self._dataId_cache = self._butler.subset(self._datasetType,
                                                 dataId=dataId).cache

        self._columns = None
        for dataId in self._dataId_cache:
            data = self._get_data(dataId)
            if data is not None:
                self._columns = data.schema.getNames()
                break

        if not self._columns:
            raise RuntimeError('No datasets or columns found!')
Ejemplo n.º 33
0
 def extract_instrument_from_repo(repo):
     """Extract the last part of the mapper name from a Butler repo.
     'lsst.obs.lsstSim.lsstSimMapper.LsstSimMapper' -> 'LSSTSIM'
     'lsst.obs.cfht.megacamMapper.MegacamMapper' -> 'CFHT'
     'lsst.obs.decam.decamMapper.DecamMapper' -> 'DECAM'
     'lsst.obs.hsc.hscMapper.HscMapper' -> 'HSC'
     """
     mapper_class = Butler.getMapperClass(repo)
     instrument = mapper_class.getCameraName()
     return instrument.upper()
Ejemplo n.º 34
0
 def butler(self):
     if not self._butler:
         if self.gen3:
             GEN3_REPO_ROOT = os.path.join(getPackageDir("ci_hsc_gen2"),
                                           "DATAgen3")
             self._butler = lsst.daf.butler.Butler(
                 GEN3_REPO_ROOT, collections=self.collection)
         else:
             self._butler = Butler(self.root)
     return self._butler
Ejemplo n.º 35
0
    def create(self):
        """Create a Butler.

        @returns a new Butler.
        """

        if hasattr(self.mapper, 'root'):
            root = self.mapper.root
        else:
            root = None
        return Butler(root=root, mapper=self.mapper)
class MatplotlibStorageTestCase(lsst.utils.tests.TestCase):

    def setUp(self):
        inputDir = os.path.join(ROOT, "data", "input")
        self.testDir = tempfile.mkdtemp(dir=os.path.join(ROOT, 'tests'), prefix=type(self).__name__+'-')
        self.butler = Butler(inputs=inputDir, outputs={"root": self.testDir, "mode": 'rw'})

    def tearDown(self):
        del self.butler
        if os.path.exists(self.testDir):
            shutil.rmtree(self.testDir)

    def testWriteFigure(self):
        """Test writing a matpotlib figure to a repository."""
        import matplotlib
        matplotlib.use("Agg")
        from matplotlib import pyplot
        fig = pyplot.figure()
        pyplot.plot([0, 1], [0, 1], "k")
        self.butler.put(fig, "test_plot", visit=1, filter="g")
        self.assertTrue(self.butler.datasetExists("test_plot", visit=1, filter="g"))
        self.assertTrue(os.path.exists(self.butler.getUri("test_plot", visit=1, filter="g")))
    def testBasics(self):
        """Test construction of a discrete sky map
        """
        butler = Butler(inputs=self.inPath, outputs={'root': self.outPath, 'mode': 'rw'})
        coordList = []  # list of sky coords of all corners of all calexp
        for dataId in (
            dict(visit=1, filter="g"),
            dict(visit=2, filter="g"),
            dict(visit=3, filter="r"),
        ):
            rawImage = butler.get("raw", dataId)
            # fake calexp by simply copying raw data; the task just cares about its bounding box
            # (which is slightly larger for raw, but that doesn't matter for this test)
            calexp = rawImage
            butler.put(calexp, "calexp", dataId)
            calexpWcs = calexp.getWcs()
            calexpBoxD = Box2D(calexp.getBBox())
            coordList += [calexpWcs.pixelToSky(corner) for corner in calexpBoxD.getCorners()]

        # use the calexp to make a sky map
        retVal = MakeDiscreteSkyMapTask.parseAndRun(
            args=[self.inPath, "--output", self.outPath, "--id", "filter=g^r"],
            config=self.config,
            doReturnResults=True,
        )
        self.assertEqual(len(retVal.resultList), 1)
        skyMap = retVal.resultList[0].result.skyMap
        self.assertEqual(type(skyMap), DiscreteSkyMap)
        self.assertEqual(len(skyMap), 1)
        tractInfo = skyMap[0]
        self.assertEqual(tractInfo.getId(), 0)
        self.assertEqual(tractInfo.getNumPatches(), Extent2I(3, 3))
        tractWcs = tractInfo.getWcs()
        tractBoxD = Box2D(tractInfo.getBBox())
        for skyPoint in coordList:
            self.assertTrue(tractBoxD.contains(tractWcs.skyToPixel(skyPoint)))
Ejemplo n.º 38
0
    def __init__(self, inputs, outputs=None):
        """Initialize the butler wrapper class.

        Parameters
        ----------
        inputs : RepositoryArgs, dict, or str
            Can be a single item or a list. Provides arguments to load an
            existing repository (or repositories). String is assumed to be a
            URI and is used as the cfgRoot (URI to the location of the cfg
            file). (Local file system URI does not have to start with
            'file://' and in this way can be a relative path). The
            'RepositoryArgs' class can be used to provide more parameters with
            which to initialize a repository (such as 'mapper', 'mapperArgs',
            'tags', etc. See the 'RepositoryArgs' documentation for more
            details). A dict may be used as shorthand for a 'RepositoryArgs'
            class instance. The dict keys must match parameters to the
            'RepositoryArgs.__init__' function.
        outputs : RepositoryArgs, dict, or str, optional
            Provides arguments to load one or more existing repositories or
            create new ones. The different types are handled the same as for
            'inputs'. (the default is None.)
        """

        self._butler = Butler(inputs=inputs, outputs=outputs)
Ejemplo n.º 39
0
# Creates the data dictionary used in the ps example

from lsst.daf.persistence import Butler
import pickle

n_max=0
stats={}

# output dir of the stack processing

output_dir = ""
butler = Butler(output_dir)

for dataref in butler.subset(datasetType='src'):

    if dataref.datasetExists(): # processrCcd did not fail

        visit = dataref.dataId['visit']
        src = dataref.get()
        n = len(src)
        print visit, n		
        if visit in stats:
            stats[visit].append(n)
        else:
            stats[visit]=list()
            stats[visit].append(n)

pickle.dump(stats, open( "../test/ps.pkl", "wb"))

Ejemplo n.º 40
0
class ButlerWrapper(object):

    def __init__(self, inputs, outputs=None):
        """Initialize the butler wrapper class.

        Parameters
        ----------
        inputs : RepositoryArgs, dict, or str
            Can be a single item or a list. Provides arguments to load an
            existing repository (or repositories). String is assumed to be a
            URI and is used as the cfgRoot (URI to the location of the cfg
            file). (Local file system URI does not have to start with
            'file://' and in this way can be a relative path). The
            'RepositoryArgs' class can be used to provide more parameters with
            which to initialize a repository (such as 'mapper', 'mapperArgs',
            'tags', etc. See the 'RepositoryArgs' documentation for more
            details). A dict may be used as shorthand for a 'RepositoryArgs'
            class instance. The dict keys must match parameters to the
            'RepositoryArgs.__init__' function.
        outputs : RepositoryArgs, dict, or str, optional
            Provides arguments to load one or more existing repositories or
            create new ones. The different types are handled the same as for
            'inputs'. (the default is None.)
        """

        self._butler = Butler(inputs=inputs, outputs=outputs)

    def setInputsAndOutputs(self, inputs, outputs=None):
        """Set the inputs and outputs of butler.

        Parameters
        ----------
        inputs : RepositoryArgs, dict, or str
            Can be a single item or a list. Provides arguments to load an
            existing repository (or repositories). String is assumed to be a
            URI and is used as the cfgRoot (URI to the location of the cfg
            file). (Local file system URI does not have to start with
            'file://' and in this way can be a relative path). The
            'RepositoryArgs' class can be used to provide more parameters with
            which to initialize a repository (such as 'mapper', 'mapperArgs',
            'tags', etc. See the 'RepositoryArgs' documentation for more
            details). A dict may be used as shorthand for a 'RepositoryArgs'
            class instance. The dict keys must match parameters to the
            'RepositoryArgs.__init__' function.
        outputs : RepositoryArgs, dict, or str, optional
            Provides arguments to load one or more existing repositories or
            create new ones. The different types are handled the same as for
            'inputs'. (the default is None.)
        """

        self._butler = Butler(inputs=inputs, outputs=outputs)

    def getRawExp(self, visit, raft, sensor, snap=None):
        """Get the raw exposure.

        Parameters
        ----------
        visit : int
            Visit Id.
        raft : str
            Abbreviated raft name (e.g. "R22").
        sensor : str
            Abbreviated sensor name (e.g. "S11").
        snap : int, optional
            Snap time (0 or 1) means first/ second exposure. (the default is
            None.)

        Returns
        -------
        lsst.afw.image.exposure.exposure.ExposureF
            Raw exposure object.
        """

        dataId = self._getDefaultDataId(visit, raft, sensor)
        if (snap is not None) and isinstance(snap, (int, float)):
            dataId["snap"] = int(snap)

        return self._butler.get("raw", dataId=dataId)

    def _getDefaultDataId(self, visit, raft, sensor):
        """Get the default data Id.

        Parameters
        ----------
        visit : int
            Visit Id.
        raft : str
            Abbreviated raft name (e.g. "R22").
        sensor : str
            Abbreviated sensor name (e.g. "S11").

        Returns
        -------
        dict
            Default data Id.
        """

        dataId = dict(visit=int(visit), raftName=raft, detectorName=sensor)

        return dataId

    def getPostIsrCcd(self, visit, raft, sensor, afilter=None):
        """Get the post-ISR CCD exposure.

        ISR: Instrument signature removal.
        CCD: Charge-coupled device.

        Parameters
        ----------
        visit : int
            Visit Id.
        raft : str
            Abbreviated raft name (e.g. "R22").
        sensor : str
            Abbreviated sensor name (e.g. "S11").
        afilter : str, optional
            Active filter ("u", "g", "r", "i", "z", "y") (the default is None.)

        Returns
        -------
        lsst.afw.image.exposure.exposure.ExposureF
            Post-ISR CCD object.
        """

        dataId = self._getDefaultDataId(visit, raft, sensor)
        if (afilter is not None) and isinstance(afilter, str):
            dataId["filter"] = afilter

        return self._butler.get("postISRCCD", dataId=dataId)

    @staticmethod
    def getImageData(exposure):
        """Get the image data.

        Parameters
        ----------
        exposure : lsst.afw.image.exposure.exposure.ExposureF
            Exposure object.

        Returns
        -------
        numpy.ndarray
            Image data.
        """

        # Get the numpy array data based on the input object type
        if isinstance(exposure, np.ndarray):
            data = exposure
        elif hasattr(exposure, "getMaskedImage"):
            data = exposure.getMaskedImage().getImage().getArray()
        elif hasattr(exposure, "getImage"):
            data = exposure.getImage().getArray()
        else:
            data = exposure.getArray()

        # Return the data in numpy array
        return data
 def setUp(self):
     inputDir = os.path.join(ROOT, "data", "input")
     self.testDir = tempfile.mkdtemp(dir=os.path.join(ROOT, 'tests'), prefix=type(self).__name__+'-')
     self.butler = Butler(inputs=inputDir, outputs={"root": self.testDir, "mode": 'rw'})