def instanceSearch(self, path):
        """Search for the given path in this storage instance.

        If the path contains an HDU indicator (a number in brackets before the
        dot, e.g. 'foo.fits[1]', this will be stripped when searching and so
        will match filenames without the HDU indicator, e.g. 'foo.fits'. The
        path returned WILL contain the indicator though, e.g. ['foo.fits[1]'].

        Parameters
        ----------
        path : string
            A filename (and optionally prefix path) to search for within root.

        Returns
        -------
        string or None
            The location that was found, or None if no location was found.
        """
        strippedPath = path
        if strippedPath.endswith(']'):
            strippedPath = strippedPath[:strippedPath.rfind('[')]
        location = dafPersist.ButlerLocation(pythonType=None,
                                             cppType=None,
                                             storageName=None,
                                             locationList=[strippedPath],
                                             dataId={},
                                             mapper=None,
                                             storage=None)
        return bool(self.exists(location))
Example #2
0
 def map_x(self, dataId, write):
     path = "foo%(ccd)d.pickle" % dataId
     if not write:
         path = "parent/" + path
     return dafPersist.ButlerLocation(
         "lsst.afw.image.BBox", "lsst::afw::image::BBox", "PickleStorage",
         path, {}, self, dafPersist.Storage.makeFromURI(os.getcwd()))
 def map_str(self, dataId, write):
     path = os.path.join(self.root, 'raw')
     path = os.path.join(path, 'raw_v' + str(dataId['str']) + '_f' + dataId['filter'] + '.fits.gz')
     if os.path.exists(path):
         return dp.ButlerLocation(str, None, 'PickleStorage', path, dataId,
                                  self, self.storage)
     return None
    def putRepositoryCfg(cls, cfg, loc=None):
        """Serialize a RepositoryCfg to a location.

        When loc == cfg.root, the RepositoryCfg is to be written at the root
        location of the repository. In that case, root is not written, it is
        implicit in the location of the cfg. This allows the cfg to move from
        machine to machine without modification.

        Parameters
        ----------
        cfg : RepositoryCfg instance
            The RepositoryCfg to be serailized.
        loc : string, optional
            The URI location (can be relative path) to write the RepositoryCfg.
            If loc is None, the location will be read from the root parameter
            of loc.

        Returns
        -------
        None
        """
        storage = dafPersist.Storage.makeFromURI(
            cfg.root if loc is None else loc, create=True)
        location = dafPersist.ButlerLocation(
            pythonType=dafPersist.RepositoryCfg,
            cppType=None,
            storageName=None,
            locationList=None,
            dataId={},
            mapper=None,
            storage=storage,
            usedDataId=None,
            datasetType=None)
        storage.write(location, cfg)
Example #5
0
 def map_raw(self, dataId, write):
     python = 'astropy.io.fits.HDUList'
     persistable = None
     storage = 'FitsStorage'
     path = 'butlerAlias/data/input/raw/raw_v' + \
         str(dataId['visit']) + '_f' + dataId['filter'] + '.fits.gz'
     return dafPersist.ButlerLocation(python, persistable, storage, path,
                                      dataId, self, dafPersist.Storage.makeFromURI(ROOT))
 def map_obj(self, dataId, write):
     loc = dafPersist.ButlerLocation(pythonType=MyTestObject,
                                     cppType=None,
                                     storageName=None,
                                     locationList=['testname'],
                                     dataId={},
                                     mapper=self,
                                     storage=self.storage)
     return loc
Example #7
0
 def map_table(self, dataId, write):
     loc = dafPersist.ButlerLocation(pythonType=lsst.afw.table.BaseCatalog,
                                     cppType=None,
                                     storageName=None,
                                     locationList=['testname'],
                                     dataId={},
                                     mapper=self,
                                     storage=self.rootStorage)
     return loc
 def map_pickled(self, dataId, write):
     python = 'dict'
     persistable = None
     storage = 'PickleStorage'
     path = os.path.join(self.root, 'raw')
     path = os.path.join(path, 'pickled_v' + str(dataId['visit']) + '_f' + dataId['filter'] + '.fits.gz')
     if write or os.path.exists(path):
         return dp.ButlerLocation(python, persistable, storage, path,
                                  dataId, self, self.storage)
 def map_raw(self, dataId, write):
     python = 'astropy.io.fits.HDUList'
     persistable = None
     storage = 'PickleStorage'
     path = os.path.join(self.root, 'raw')
     path = os.path.join(path, 'raw_v' + str(dataId['visit']) + '_f' + dataId['filter'] + '.fits.gz')
     if os.path.exists(path):
         return dp.ButlerLocation(python, persistable, storage, path,
                                  dataId, self, self.storage)
     return None
 def map_str(self, dataId, write):
     template = "strfile_%(strId)s.pickle"
     path = template % dataId
     if not write:
         if not self.storage.exists(path):
             return None
     location = self.storage.locationWithRoot(path)
     return dp.ButlerLocation(pythonType=PosixPickleStringHanlder,
                              cppType=None,
                              storageName='PickleStorage',
                              locationList=location,
                              dataId=dataId,
                              mapper=self,
                              storage=self.storage)
Example #11
0
    def map_defects(self, dataId, write=False):
        """Map defects dataset.

        Returns
        -------
        `lsst.daf.butler.ButlerLocation`
            Minimal ButlerLocation containing just the locationList field
            (just enough information that bypass_defects can use it).
        """
        defectFitsPath = self._defectLookup(dataId=dataId)
        if defectFitsPath is None:
            raise RuntimeError("No defects available for dataId=%s" % (dataId,))

        return dafPersist.ButlerLocation(None, None, None, defectFitsPath,
                                         dataId, self,
                                         storage=self.rootStorage)
Example #12
0
    def instanceSearch(self, path):
        """Search for the given path in this storage instance.

        In this StorageInterface `path` is the name of a table within the connected database.

        Parameters
        ----------
        path : string
            The name of a table within the connected database.

        Returns
        -------
        bool
            True if the table exists, else false.

        """
        location = dafPersist.ButlerLocation(pythonType=None, cppType=None, storageName=None,
                                             locationList=[path], dataId={}, mapper=None, storage=None)
        return bool(self.exists(location))
 def test_copy(self):
     repoLocation = self._getS3URI('test_copy')
     storage = S3Storage(uri=repoLocation, create=True)
     loc = dafPersist.ButlerLocation(pythonType=MyTestObject,
                                     cppType=None,
                                     storageName=None,
                                     locationList=['testname'],
                                     dataId={},
                                     mapper=self,
                                     storage=storage)
     testObj = MyTestObject('foo')
     storage.write(loc, testObj)
     storage.copyFile('testname', 'testname_copy')
     storage.read(loc)
     reloadedObj = storage.read(loc)
     self.assertEqual(testObj, reloadedObj[0])
     loc.locationList = ['testname_copy']
     copiedObj = storage.read(loc)
     self.assertEqual(testObj, copiedObj[0])
Example #14
0
    def getRepositoryCfg(cls, uri):
        """Get a persisted RepositoryCfg

        Parameters
        ----------
        uri : URI or path to a RepositoryCfg
            Description

        Returns
        -------
        A RepositoryCfg instance or None
        """
        storage = dafPersist.Storage.makeFromURI(uri)
        location = dafPersist.ButlerLocation(
            pythonType=dafPersist.RepositoryCfg,
            cppType=None,
            storageName=None,
            locationList=None,
            dataId={},
            mapper=None,
            storage=storage,
            usedDataId=None,
            datasetType=None)
        return storage.read(location)
 def map(self, datasetType, dataId, write=False):
     path = self.templates[datasetType] % dataId
     return dafPersist.ButlerLocation(
         None, None, "PickleStorage", path, {}, self,
         dafPersist.Storage.makeFromURI(self.root))
Example #16
0
    def _initMappings(self, policy, rootStorage=None, calibStorage=None, provided=None, use_default=True):
        """Initialize mappings
        For each of the dataset types that we want to be able to read, there are
        methods that can be created to support them:
        * map_<dataset> : determine the path for dataset
        * std_<dataset> : standardize the retrieved dataset
        * bypass_<dataset> : retrieve the dataset (bypassing the usual retrieval machinery)
        * query_<dataset> : query the registry
        Besides the dataset types explicitly listed in the policy, we create
        additional, derived datasets for additional conveniences, e.g., reading
        the header of an image, retrieving only the size of a catalog.
        Parameters
        ----------
        policy : `lsst.daf.persistence.Policy`
            Policy with per-camera defaults already merged
        rootStorage : `Storage subclass instance`
            Interface to persisted repository data.
        calibRoot : `Storage subclass instance`
            Interface to persisted calib repository data
        provided : `list` of `str`
            Keys provided by the mapper
        use_default : `bool`
            Load default camera mappings
        """
        # Sub-dictionaries (for exposure/calibration/dataset types)
        imgMappingPolicy = dafPersist.Policy(dafPersist.Policy.defaultPolicyFile(
            "obs_base", "ImageMappingDictionary.paf", "policy"))
        expMappingPolicy = dafPersist.Policy(dafPersist.Policy.defaultPolicyFile(
            "obs_base", "ExposureMappingDictionary.paf", "policy"))
        calMappingPolicy = dafPersist.Policy(dafPersist.Policy.defaultPolicyFile(
            "obs_base", "CalibrationMappingDictionary.paf", "policy"))
        dsMappingPolicy = dafPersist.Policy(dafPersist.Policy.defaultPolicyFile(
            "obs_base", "DatasetMappingDictionary.paf", "policy"))

        # Mappings
        mappingList = (
            ("images", imgMappingPolicy, ImageMapping),
            ("exposures", expMappingPolicy, ExposureMapping),
            ("calibrations", calMappingPolicy, CalibrationMapping),
            ("datasets", dsMappingPolicy, DatasetMapping)
        )
        self.mappings = dict()
        for name, defPolicy, cls in mappingList:
            if name in policy:
                datasets = policy[name]

                # Centrally-defined datasets
                defaultsPath = os.path.join(getPackageDir("obs_base"), "policy", name + ".yaml")
                if os.path.exists(defaultsPath) and use_default:
                    datasets.merge(dafPersist.Policy(defaultsPath))

                mappings = dict()
                setattr(self, name, mappings)
                for datasetType in datasets.names(True):
                    subPolicy = datasets[datasetType]
                    subPolicy.merge(defPolicy)

                    if not hasattr(self, "map_" + datasetType) and 'composite' in subPolicy:
                        def compositeClosure(dataId, write=False, mapper=None, mapping=None,
                                             subPolicy=subPolicy):
                            components = subPolicy.get('composite')
                            assembler = subPolicy['assembler'] if 'assembler' in subPolicy else None
                            disassembler = subPolicy['disassembler'] if 'disassembler' in subPolicy else None
                            python = subPolicy['python']
                            butlerComposite = dafPersist.ButlerComposite(assembler=assembler,
                                                                         disassembler=disassembler,
                                                                         python=python,
                                                                         dataId=dataId,
                                                                         mapper=self)
                            for name, component in components.items():
                                butlerComposite.add(id=name,
                                                    datasetType=component.get('datasetType'),
                                                    setter=component.get('setter', None),
                                                    getter=component.get('getter', None),
                                                    subset=component.get('subset', False),
                                                    inputOnly=component.get('inputOnly', False))
                            return butlerComposite
                        setattr(self, "map_" + datasetType, compositeClosure)
                        # for now at least, don't set up any other handling for this dataset type.
                        continue

                    if name == "calibrations":
                        mapping = cls(datasetType, subPolicy, self.registry, self.calibRegistry, calibStorage,
                                      provided=provided, dataRoot=rootStorage)
                    else:
                        mapping = cls(datasetType, subPolicy, self.registry, rootStorage, provided=provided)
                    self.keyDict.update(mapping.keys())
                    mappings[datasetType] = mapping
                    self.mappings[datasetType] = mapping
                    if not hasattr(self, "map_" + datasetType):
                        def mapClosure(dataId, write=False, mapper=weakref.proxy(self), mapping=mapping):
                            return mapping.map(mapper, dataId, write)
                        setattr(self, "map_" + datasetType, mapClosure)
                    if not hasattr(self, "query_" + datasetType):
                        def queryClosure(format, dataId, mapping=mapping):
                            return mapping.lookup(format, dataId)
                        setattr(self, "query_" + datasetType, queryClosure)
                    if hasattr(mapping, "standardize") and not hasattr(self, "std_" + datasetType):
                        def stdClosure(item, dataId, mapper=weakref.proxy(self), mapping=mapping):
                            return mapping.standardize(mapper, item, dataId)
                        setattr(self, "std_" + datasetType, stdClosure)

                    def setMethods(suffix, mapImpl=None, bypassImpl=None, queryImpl=None):
                        """Set convenience methods on CameraMapper"""
                        mapName = "map_" + datasetType + "_" + suffix
                        bypassName = "bypass_" + datasetType + "_" + suffix
                        queryName = "query_" + datasetType + "_" + suffix
                        if not hasattr(self, mapName):
                            setattr(self, mapName, mapImpl or getattr(self, "map_" + datasetType))
                        if not hasattr(self, bypassName):
                            if bypassImpl is None and hasattr(self, "bypass_" + datasetType):
                                bypassImpl = getattr(self, "bypass_" + datasetType)
                            if bypassImpl is not None:
                                setattr(self, bypassName, bypassImpl)
                        if not hasattr(self, queryName):
                            setattr(self, queryName, queryImpl or getattr(self, "query_" + datasetType))

                    # Filename of dataset
                    setMethods("filename", bypassImpl=lambda datasetType, pythonType, location, dataId:
                               [os.path.join(location.getStorage().root, p) for p in location.getLocations()])
                    # Metadata from FITS file
                    if subPolicy["storage"] == "FitsStorage":  # a FITS image
                        setMethods("md", bypassImpl=lambda datasetType, pythonType, location, dataId:
                                   readMetadata(location.getLocationsWithRoot()[0]))

                        # Add support for configuring FITS compression
                        addName = "add_" + datasetType
                        if not hasattr(self, addName):
                            setattr(self, addName, self.getImageCompressionSettings)

                        if name == "exposures":
                            setMethods("wcs", bypassImpl=lambda datasetType, pythonType, location, dataId:
                                       afwGeom.makeSkyWcs(readMetadata(location.getLocationsWithRoot()[0])))
                            setMethods("calib", bypassImpl=lambda datasetType, pythonType, location, dataId:
                                       afwImage.Calib(readMetadata(location.getLocationsWithRoot()[0])))
                            setMethods("visitInfo",
                                       bypassImpl=lambda datasetType, pythonType, location, dataId:
                                       afwImage.VisitInfo(readMetadata(location.getLocationsWithRoot()[0])))
                            setMethods("filter",
                                       bypassImpl=lambda datasetType, pythonType, location, dataId:
                                       afwImage.Filter(readMetadata(location.getLocationsWithRoot()[0])))
                            setMethods("detector",
                                       mapImpl=lambda dataId, write=False:
                                           dafPersist.ButlerLocation(
                                               pythonType="lsst.afw.cameraGeom.CameraConfig",
                                               cppType="Config",
                                               storageName="Internal",
                                               locationList="ignored",
                                               dataId=dataId,
                                               mapper=self,
                                               storage=None,
                                           ),
                                       bypassImpl=lambda datasetType, pythonType, location, dataId:
                                           self.camera[self._extractDetectorName(dataId)]
                                       )
                            setMethods("bbox", bypassImpl=lambda dsType, pyType, location, dataId:
                                       afwImage.bboxFromMetadata(
                                           readMetadata(location.getLocationsWithRoot()[0], hdu=1)))

                        elif name == "images":
                            setMethods("bbox", bypassImpl=lambda dsType, pyType, location, dataId:
                                       afwImage.bboxFromMetadata(
                                           readMetadata(location.getLocationsWithRoot()[0])))

                    if subPolicy["storage"] == "FitsCatalogStorage":  # a FITS catalog
                        setMethods("md", bypassImpl=lambda datasetType, pythonType, location, dataId:
                                   readMetadata(os.path.join(location.getStorage().root,
                                                             location.getLocations()[0]), hdu=1))

                    # Sub-images
                    if subPolicy["storage"] == "FitsStorage":
                        def mapSubClosure(dataId, write=False, mapper=weakref.proxy(self), mapping=mapping):
                            subId = dataId.copy()
                            del subId['bbox']
                            loc = mapping.map(mapper, subId, write)
                            bbox = dataId['bbox']
                            llcX = bbox.getMinX()
                            llcY = bbox.getMinY()
                            width = bbox.getWidth()
                            height = bbox.getHeight()
                            loc.additionalData.set('llcX', llcX)
                            loc.additionalData.set('llcY', llcY)
                            loc.additionalData.set('width', width)
                            loc.additionalData.set('height', height)
                            if 'imageOrigin' in dataId:
                                loc.additionalData.set('imageOrigin',
                                                       dataId['imageOrigin'])
                            return loc

                        def querySubClosure(key, format, dataId, mapping=mapping):
                            subId = dataId.copy()
                            del subId['bbox']
                            return mapping.lookup(format, subId)
                        setMethods("sub", mapImpl=mapSubClosure, queryImpl=querySubClosure)

                    if subPolicy["storage"] == "FitsCatalogStorage":
                        # Length of catalog
                        setMethods("len", bypassImpl=lambda datasetType, pythonType, location, dataId:
                                   readMetadata(os.path.join(location.getStorage().root,
                                                             location.getLocations()[0]),
                                                hdu=1).get("NAXIS2"))

                        # Schema of catalog
                        if not datasetType.endswith("_schema") and datasetType + "_schema" not in datasets:
                            setMethods("schema", bypassImpl=lambda datasetType, pythonType, location, dataId:
                                       afwTable.Schema.readFits(os.path.join(location.getStorage().root,
                                                                             location.getLocations()[0])))
Example #17
0
 def map_badSourceHist(self, dataId, write):
     path = "badSourceHist%(ccd)d.pickle" % dataId
     return dafPersist.ButlerLocation(
         "lsst.afw.image.BBox", "lsst::afw::image::BBox", "PickleStorage",
         path, {}, self, dafPersist.Storage.makeFromURI(os.getcwd()))
Example #18
0
 def map_x(self, dataId, write):
     path = "foo%(ccd)d.yaml" % dataId
     return dafPersist.ButlerLocation(
         "lsst.daf.base.PropertySet", "PropertySet", "YamlStorage", [path],
         dataId, self, dafPersist.Storage.makeFromURI(self.root))
 def map_x(self, dataId, write):
     path = "foo%(ccd)d.pickle" % dataId
     return dafPersist.ButlerLocation(
         None, None, "PickleStorage", path, {}, self,
         dafPersist.Storage.makeFromURI(os.getcwd()))
Example #20
0
 def map_p2(self, dataId, write):
     path = "p2%(ccd)d.pickle" % dataId
     path = os.path.join(self.root, self.outPath, path)
     return dafPersist.ButlerLocation(
         None, None, "PickleStorage", path, {}, self,
         dafPersist.Storage.makeFromURI(os.getcwd()))
Example #21
0
    def testSwiftStorage(self):
        """Verify that SwiftStorage implements all the StorageInterface
        functions."""
        storage = SwiftStorage(uri=self.uri, create=True)
        self.assertEqual(storage._containerName, self.container1Name)
        self.assertTrue(storage.containerExists())
        # Test containerExists by changing the container name so that it will
        # return false, and then put the name back.
        containerName = storage._containerName
        storage._containerName = "foo"
        self.assertFalse(storage.containerExists())
        storage._containerName = containerName

        testObject = dpTest.TestObject("abc")
        butlerLocation = dp.ButlerLocation(
            pythonType='lsst.daf.persistence.test.TestObject',
            cppType=None,
            storageName='PickleStorage',
            locationList='firstTestObject',
            dataId={},
            mapper=None,
            storage=storage)

        # Test writing an object to storage
        storage.write(butlerLocation, testObject)
        # Test getting a local copy of the file in storage.
        localFile = storage.getLocalFile('firstTestObject')
        # Test reading the file in a new object using the localFile's name, as
        # well as using the localFile handle directly.
        for f in (open(localFile.name, 'r'), localFile):
            if sys.version_info.major >= 3:
                obj = pickle.load(f, encoding="latin1")
            else:
                obj = pickle.load(f)
            self.assertEqual(testObject, obj)
        # Test reading the butlerLocation, should return the object instance.
        reloadedObject = storage.read(butlerLocation)
        self.assertEqual(testObject, reloadedObject[0])
        # Test the 'exists' function with a string
        self.assertTrue(storage.exists('firstTestObject'))
        self.assertFalse(storage.exists('secondTestObject'))
        # Test the 'exists' function with a ButlerLocation. (note that most of
        # the butler location fields are unused in exists and so are set to
        # None here.)
        location = dp.ButlerLocation(pythonType=None,
                                     cppType=None,
                                     storageName=None,
                                     locationList=['firstTestObject'],
                                     dataId={},
                                     mapper=None,
                                     storage=None)
        self.assertTrue(storage.exists(location))
        location = dp.ButlerLocation(pythonType=None,
                                     cppType=None,
                                     storageName=None,
                                     locationList=['secondTestObject'],
                                     dataId={},
                                     mapper=None,
                                     storage=None)
        self.assertFalse(storage.exists(location))
        # Test the 'instanceSearch' function, with and without the fits header
        # extension
        self.assertEqual(storage.instanceSearch('firstTestObject'),
                         ['firstTestObject'])
        self.assertEqual(storage.instanceSearch('firstTestObject[1]'),
                         ['firstTestObject[1]'])
        self.assertEqual(storage.instanceSearch('first*Object'),
                         ['firstTestObject'])
        self.assertEqual(storage.instanceSearch('*TestObject[1]'),
                         ['firstTestObject[1]'])
        self.assertIsNone(storage.instanceSearch('secondTestObject'))
        self.assertIsNone(storage.instanceSearch('secondTestObject[1]'))
        # Test the 'search' function
        self.assertEqual(storage.search(self.uri, 'firstTestObject'),
                         ['firstTestObject'])
        # Test the copy function
        storage.copyFile('firstTestObject', 'secondTestObject')
        with self.assertRaises(RuntimeError):
            storage.copyFile('thirdTestObject', 'fourthTestObject')
        # Test locationWithRoot
        self.assertEqual(storage.locationWithRoot('firstTestObject'),
                         self.uri + '/' + 'firstTestObject')
        # Test getRepositoryCfg and putRepositoryCfg
        repositoryCfg = dp.RepositoryCfg.makeFromArgs(dp.RepositoryArgs(
            root=self.uri, mapper=TestMapper),
                                                      parents=None)
        storage.putRepositoryCfg(repositoryCfg)
        reloadedRepoCfg = storage.getRepositoryCfg(self.uri)
        self.assertEqual(repositoryCfg, reloadedRepoCfg)
        # Test getting a non-existant RepositoryCfg
        self.assertIsNone(storage.getRepositoryCfg(self.uri2))
        # Test getting the mapper class from the repoCfg in the repo.
        mapper = SwiftStorage.getMapperClass(self.uri)
        self.assertEqual(mapper, TestMapper)
        # Test for a repoCfg that resides outside its repository; it has a
        # root that is not the same as its location.
        repositoryCfg = dp.RepositoryCfg.makeFromArgs(dp.RepositoryArgs(
            root='foo/bar/baz', mapper='lsst.obs.base.CameraMapper'),
                                                      parents=None)
        storage.putRepositoryCfg(repositoryCfg, loc=self.uri)
        reloadedRepoCfg = storage.getRepositoryCfg(self.uri)
        self.assertEqual(repositoryCfg, reloadedRepoCfg)

        storage.deleteContainer()
        self.assertFalse(storage.containerExists())