示例#1
0
def makeSimpleQGraph(nQuanta=5,
                     pipeline=None,
                     butler=None,
                     root=None,
                     skipExisting=False,
                     inMemory=True,
                     userQuery=""):
    """Make simple QuantumGraph for tests.

    Makes simple one-task pipeline with AddTask, sets up in-memory
    registry and butler, fills them with minimal data, and generates
    QuantumGraph with all of that.

    Parameters
    ----------
    nQuanta : `int`
        Number of quanta in a graph.
    pipeline : `~lsst.pipe.base.Pipeline`
        If `None` then one-task pipeline is made with `AddTask` and
        default `AddTaskConfig`.
    butler : `~lsst.daf.butler.Butler`, optional
        Data butler instance, this should be an instance returned from a
        previous call to this method.
    root : `str`
        Path or URI to the root location of the new repository. Only used if
        ``butler`` is None.
    skipExisting : `bool`, optional
        If `True` (default), a Quantum is not created if all its outputs
        already exist.
    inMemory : `bool`, optional
        If true make in-memory repository.
    userQuery : `str`, optional
        The user query to pass to ``makeGraph``, by default an empty string.

    Returns
    -------
    butler : `~lsst.daf.butler.Butler`
        Butler instance
    qgraph : `~lsst.pipe.base.QuantumGraph`
        Quantum graph instance
    """

    if pipeline is None:
        pipeline = makeSimplePipeline(nQuanta=nQuanta)

    if butler is None:

        if root is None:
            raise ValueError("Must provide `root` when `butler` is None")

        config = Config()
        if not inMemory:
            config["registry", "db"] = f"sqlite:///{root}/gen3.sqlite"
            config[
                "datastore",
                "cls"] = "lsst.daf.butler.datastores.posixDatastore.PosixDatastore"
        repo = butlerTests.makeTestRepo(root, {}, config=config)
        collection = "test"
        butler = Butler(butler=repo, run=collection)

        # Add dataset types to registry
        registerDatasetTypes(butler.registry, pipeline.toExpandedPipeline())

        instrument = pipeline.getInstrument()
        if instrument is not None:
            if isinstance(instrument, str):
                instrument = doImport(instrument)
            instrumentName = instrument.getName()
        else:
            instrumentName = "INSTR"

        # Add all needed dimensions to registry
        butler.registry.insertDimensionData("instrument",
                                            dict(name=instrumentName))
        butler.registry.insertDimensionData(
            "detector", dict(instrument=instrumentName, id=0,
                             full_name="det0"))

        # Add inputs to butler
        data = numpy.array([0., 1., 2., 5.])
        butler.put(data, "add_dataset0", instrument=instrumentName, detector=0)

    # Make the graph
    builder = pipeBase.GraphBuilder(registry=butler.registry,
                                    skipExisting=skipExisting)
    qgraph = builder.makeGraph(pipeline,
                               collections=[butler.run],
                               run=butler.run,
                               userQuery=userQuery)

    return butler, qgraph
示例#2
0
 def testConstructor(self):
     """Independent test of constructor.
     """
     butler = Butler(self.configFile)
     self.assertIsInstance(butler, Butler)
示例#3
0
    def testGetDatasetTypes(self):
        butler = Butler(self.tmpConfigFile)
        dimensions = butler.registry.dimensions.extract(
            ["instrument", "visit", "physical_filter"])
        dimensionEntries = (("instrument", {
            "instrument": "DummyCam"
        }), ("instrument", {
            "instrument": "DummyHSC"
        }), ("instrument", {
            "instrument": "DummyCamComp"
        }), ("physical_filter", {
            "instrument": "DummyCam",
            "physical_filter": "d-r"
        }), ("visit", {
            "instrument": "DummyCam",
            "visit": 42,
            "physical_filter": "d-r"
        }))
        storageClass = self.storageClassFactory.getStorageClass(
            "StructuredData")
        # Add needed Dimensions
        for name, value in dimensionEntries:
            butler.registry.addDimensionEntry(name, value)

        # When a DatasetType is added to the registry entries are created
        # for each component. Need entries for each component in the test
        # configuration otherwise validation won't work. The ones that
        # are deliberately broken will be ignored later.
        datasetTypeNames = {"metric", "metric2", "metric4", "metric33", "pvi"}
        components = set()
        for datasetTypeName in datasetTypeNames:
            # Create and register a DatasetType
            self.addDatasetType(datasetTypeName, dimensions, storageClass,
                                butler.registry)

            for componentName in storageClass.components:
                components.add(
                    DatasetType.nameWithComponent(datasetTypeName,
                                                  componentName))

        fromRegistry = butler.registry.getAllDatasetTypes()
        self.assertEqual({d.name
                          for d in fromRegistry},
                         datasetTypeNames | components)

        # Now that we have some dataset types registered, validate them
        butler.validateConfiguration(ignore=[
            "test_metric_comp", "metric3", "calexp", "DummySC",
            "datasetType.component"
        ])

        # Add a new datasetType that will fail template validation
        self.addDatasetType("test_metric_comp", dimensions, storageClass,
                            butler.registry)
        if self.validationCanFail:
            with self.assertRaises(ValidationError):
                butler.validateConfiguration()

        # Rerun validation but with a subset of dataset type names
        butler.validateConfiguration(datasetTypeNames=["metric4"])

        # Rerun validation but ignore the bad datasetType
        butler.validateConfiguration(ignore=[
            "test_metric_comp", "metric3", "calexp", "DummySC",
            "datasetType.component"
        ])
示例#4
0
            contents.saveDimensionData(
                "visit_definition",
                src_repo.registry.queryDimensionRecords("visit_definition"))
            contents.saveDimensionData(
                "visit_detector_region",
                src_repo.registry.queryDimensionRecords(
                    "visit_detector_region"))
            # runs included automatically by saveDatasets
        dest_repo.import_(directory=src_dir,
                          filename=export_file.name,
                          transfer="copy")


########################################
# Put everything together

logging.info("Creating temporary repository...")
with tempfile.TemporaryDirectory() as workspace:
    temp_repo = _make_repo_with_instruments(workspace,
                                            _get_instruments(DEST_DIR))
    logging.info("Ingesting raws...")
    _ingest_raws(temp_repo, RAW_DIR, RAW_RUN)
    logging.info("Downloading ephemerides...")
    _get_ephem(workspace, RAW_RUN, DEST_RUN)
    temp_repo.registry.refresh()  # Pipeline added dataset types
    preloaded = Butler(DEST_DIR, writeable=True)
    logging.info("Transferring ephemerides to dataset...")
    _transfer_ephems(EPHEM_DATASET, temp_repo, workspace, DEST_RUN, preloaded)

logging.info("Solar system catalogs copied to %s:%s", DEST_DIR, DEST_RUN)
示例#5
0
# Build a parser for command line arguments
parser = argparse.ArgumentParser(
    description="Make a SkyMap and add it to a gen3 repository.")
parser.add_argument("butler",
                    metavar="Butler",
                    type=str,
                    help="Path to a gen3 butler")
parser.add_argument(
    "collection",
    type=str,
    metavar="Collection",
    help="Name of the Butler collection the SkyMap should be inserted into")
parser.add_argument("-C",
                    "--config-file",
                    dest="configFile",
                    help="Path to a config file overrides file")

args = parser.parse_args()

config = MakeGen3SkyMapConfig()
if args.configFile:
    if not os.path.exists(args.configFile):
        print("Path to config file specified does not exist")
        sys.exit(1)
    config.load(args.configFile)

# Construct the SkyMap Creation task and run it
skyMapTask = MakeGen3SkyMapTask(config=config)
butler = Butler(args.butler, run=args.collection)
skyMapTask.run(butler)
示例#6
0
class Gen2ConvertTestCase(lsst.utils.tests.TestCase):
    def setUp(self):
        self.butler = Butler(REPO_ROOT, run="shared/ci_hsc")

    def tearDown(self):
        del self.butler

    def testImpliedDimensions(self):
        """Test that implied dimensions are expanded properly when populating
        the Dataset table.
        """
        # All of the dataset types below have Visit or Exposure in their
        # dimensions, which means PhysicalFilter and AbstractFilter are
        # implied. dimensions for them.  Those should be non-null and
        # consistent.
        sql = """
            SELECT physical_filter, abstract_filter
            FROM dataset
            WHERE dataset_type_name IN (
                'raw', 'calexp', 'icExp', 'src', 'icSrc',
                'deepCoadd_directWarp', 'deepCoadd_psfMatchedWarp'
            )
            """
        count = 0
        for row in self.butler.registry.query(sql):
            if row["physical_filter"] == "HSC-R":
                self.assertEqual(row["abstract_filter"], "r")
            elif row["physical_filter"] == "HSC-I":
                self.assertEqual(row["abstract_filter"], "i")
            else:
                self.fail("physical_filter not in ('HSC-R', 'HSC-I')")
            count += 1
        self.assertGreater(count, 0)

    def testObservationPacking(self):
        """Test that packing Visit+Detector into an integer in Gen3 generates
        the same results as in Gen2.
        """
        butler2 = Butler2(os.path.join(REPO_ROOT, "rerun", "ci_hsc"))
        for visit, detector in [(903334, 16), (903338, 25), (903986, 100)]:
            dataId2 = {"visit": visit, "ccd": detector}
            dataId3 = self.butler.registry.expandDataId(visit=visit,
                                                        detector=detector,
                                                        instrument="HSC")
            self.assertEqual(
                butler2.get("ccdExposureId", dataId2),
                self.butler.registry.packDataId("visit_detector", dataId3))

    def testSkyMapPacking(self):
        """Test that packing Tract+Patch into an integer in Gen3 works and is
        self-consistent.

        Note that this packing does *not* use the same algorithm as Gen2 and
        hence generates different IDs, because the Gen2 algorithm is
        problematically tied to the *default* SkyMap for a particular camera,
        rather than the SkyMap actually used.
        """
        # SkyMap used by ci_hsc has only one tract, so the test coverage in
        # that area isn't great.  That's okay because that's tested in SkyMap;
        # what we care about here is that the converted repo has the necessary
        # metadata to construct and use these packers at all.
        for patch in [0, 43, 52]:
            dataId = self.butler.registry.expandDataId(skymap="ci_hsc",
                                                       tract=0,
                                                       patch=patch,
                                                       abstract_filter='r')
            packer1 = self.butler.registry.makeDataIdPacker(
                "tract_patch", dataId)
            packer2 = self.butler.registry.makeDataIdPacker(
                "tract_patch_abstract_filter", dataId)
            self.assertNotEqual(packer1.pack(dataId), packer2.pack(dataId))
            self.assertEqual(
                packer1.unpack(packer1.pack(dataId)),
                DataId(dataId, dimensions=packer1.dimensions.required))
            self.assertEqual(packer2.unpack(packer2.pack(dataId)), dataId)
            self.assertEqual(packer1.pack(dataId, abstract_filter='i'),
                             packer1.pack(dataId))
            self.assertNotEqual(packer2.pack(dataId, abstract_filter='i'),
                                packer2.pack(dataId))

    def testRawFilters(self):
        """Test that raw data has the Filter component set.
        """
        # Note that the 'r' and 'i' values here look like Gen3 abstract_filter
        # values, but they're something weird in between abstract and physical
        # filters; if we had HSC-R2 data, the corresponding value would be
        # 'r2', not just 'r'.  We need that to be compatible with Gen2 usage
        # of the afw.image.Filter system.
        rawR = self.butler.get("raw",
                               instrument="HSC",
                               exposure=903334,
                               detector=16)
        self.assertEqual(rawR.getFilter().getName(), "r")
        rawI = self.butler.get("raw",
                               instrument="HSC",
                               exposure=903986,
                               detector=16)
        self.assertEqual(rawI.getFilter().getName(), "i")

    def testCuratedCalibrations(self):
        """Test that defects, the camera, and the brighter-fatter kernel were
        added to the Gen3 registry.
        """
        originInfo = DatasetOriginInfoDef(["raw", "calib"], [])
        # Query for raws that have associated calibs of the types below;
        # result is an iterator over rows that correspond roughly to data IDs.
        rowsWithCalibs = list(
            self.butler.registry.selectMultipleDatasetTypes(
                originInfo,
                expression="",
                required=["raw", "camera", "bfKernel", "defects"],
                perDatasetTypeDimensions=["calibration_label"]))
        # Query for all rows, with no restriction on having associated calibs.
        rowsWithoutCalibs = list(
            self.butler.registry.selectMultipleDatasetTypes(
                originInfo,
                expression="",
                required=["raw"],
            ))
        # We should get the same raws in both cases because all of the raws
        # here should have associated calibs.
        self.assertGreater(len(rowsWithoutCalibs), 0)
        self.assertEqual(len(rowsWithCalibs), len(rowsWithoutCalibs))
        # Try getting those calibs to make sure the files themselves are
        # where the Butler thinks they are.
        butler = Butler(REPO_ROOT, run="calib")
        instrument = HyperSuprimeCam()
        for row in rowsWithCalibs:
            refsByName = {k.name: v for k, v in row.datasetRefs.items()}
            cameraFromButler = butler.get(refsByName["camera"])
            cameraFromInstrument = instrument.getCamera()
            self.assertEqual(len(cameraFromButler), len(cameraFromInstrument))
            self.assertEqual(cameraFromButler.getName(),
                             cameraFromInstrument.getName())
            self.assertFloatsEqual(butler.get(refsByName["bfKernel"]),
                                   instrument.getBrighterFatterKernel())
            defects = butler.get(refsByName["defects"])
            self.assertIsInstance(defects, lsst.meas.algorithms.Defects)

    def testBrightObjectMasks(self):
        """Test that bright object masks are included in the Gen3 repo.
        """
        regions = self.butler.get("brightObjectMask",
                                  skymap='ci_hsc',
                                  tract=0,
                                  patch=69,
                                  abstract_filter='r')
        self.assertIsInstance(regions, ObjectMaskCatalog)
        self.assertGreater(len(regions), 0)
示例#7
0
#!/usr/bin/env python

from lsst.daf.butler import Butler, CollectionType
butler = Butler('RSP_CHECK_REPO', writeable=True)
butler.registry.registerCollection('HSC/defaults', CollectionType.CHAINED,
                                   'A CHAINED collection to package up the collections to be queried')
colls = [el for el in butler.registry.queryCollections() if el != 'HSC/defaults']
butler.registry.setCollectionChain('HSC/defaults', colls)
示例#8
0
class HscIngestTestCase(lsst.utils.tests.TestCase):
    def setUp(self):
        # Use a temporary working directory
        self.root = tempfile.mkdtemp(dir=TESTDIR)
        Butler.makeRepo(self.root)
        self.butler = Butler(self.root, run="raw")
        # Register the instrument and its static metadata
        HyperSuprimeCam().register(self.butler.registry)
        # Make a default config for test methods to play with
        self.config = RawIngestTask.ConfigClass()
        self.config.onError = "break"
        self.file = os.path.join(testDataDirectory, "hsc", "raw",
                                 "HSCA90402512.fits.gz")
        self.dataId = dict(instrument="HSC", exposure=904024, detector=50)

    def tearDown(self):
        if os.path.exists(self.root):
            shutil.rmtree(self.root, ignore_errors=True)

    def runIngest(self, files=None):
        if files is None:
            files = [self.file]
        task = RawIngestTask(config=self.config, butler=self.butler)
        task.log.setLevel(
            task.log.FATAL)  # silence logs, since we expect a lot of warnings
        task.run(files)

    def runIngestTest(self, files=None):
        self.runIngest(files)
        exposure = self.butler.get("raw", self.dataId)
        metadata = self.butler.get("raw.metadata", self.dataId)
        image = self.butler.get("raw.image", self.dataId)
        self.assertImagesEqual(exposure.image, image)
        self.assertEqual(metadata.toDict(), exposure.getMetadata().toDict())

    def testSymLink(self):
        self.config.transfer = "symlink"
        self.runIngestTest()

    def testCopy(self):
        self.config.transfer = "copy"
        self.runIngestTest()

    def testHardLink(self):
        self.config.transfer = "hardlink"
        self.runIngestTest()

    def testInPlace(self):
        # hardlink into repo root manually
        newPath = os.path.join(self.butler.datastore.root,
                               os.path.basename(self.file))
        os.link(self.file, newPath)
        self.config.transfer = None
        self.runIngestTest([newPath])

    def testOnConflictFail(self):
        self.config.transfer = "symlink"
        self.config.conflict = "fail"
        self.runIngest()  # this one shd
        with self.assertRaises(Exception):
            self.runIngest()  # this ont

    def testOnConflictIgnore(self):
        self.config.transfer = "symlink"
        self.config.conflict = "ignore"
        self.runIngest()  # this one should succeed
        n1, = self.butler.registry.query("SELECT COUNT(*) FROM Dataset")
        self.runIngest()  # this ong should silently fail
        n2, = self.butler.registry.query("SELECT COUNT(*) FROM Dataset")
        self.assertEqual(n1, n2)

    def testOnConflictStash(self):
        self.config.transfer = "symlink"
        self.config.conflict = "ignore"
        self.config.stash = "stash"
        self.runIngest()  # this one should write to 'rawn
        self.runIngest()  # this one should write to 'stashn
        dt = self.butler.registry.getDatasetType("raw.metadata")
        ref1 = self.butler.registry.find(self.butler.collection, dt,
                                         self.dataId)
        ref2 = self.butler.registry.find("stash", dt, self.dataId)
        self.assertNotEqual(ref1.id, ref2.id)
        self.assertEqual(
            self.butler.get(ref1).toDict(),
            self.butler.getDirect(ref2).toDict())

    def testOnErrorBreak(self):
        self.config.transfer = "symlink"
        self.config.onError = "break"
        # Failing to ingest this nonexistent file after ingesting the valid one should
        # leave the valid one in the registry, despite raising an exception.
        with self.assertRaises(Exception):
            self.runIngest(files=[self.file, "nonexistent.fits"])
        dt = self.butler.registry.getDatasetType("raw.metadata")
        self.assertIsNotNone(
            self.butler.registry.find(self.butler.collection, dt, self.dataId))

    def testOnErrorContinue(self):
        self.config.transfer = "symlink"
        self.config.onError = "continue"
        # Failing to ingest nonexistent files before and after ingesting the
        # valid one should leave the valid one in the registry and not raise
        # an exception.
        self.runIngest(
            files=["nonexistent.fits", self.file, "still-not-here.fits"])
        dt = self.butler.registry.getDatasetType("raw.metadata")
        self.assertIsNotNone(
            self.butler.registry.find(self.butler.collection, dt, self.dataId))

    def testOnErrorRollback(self):
        self.config.transfer = "symlink"
        self.config.onError = "rollback"
        # Failing to ingest nonexistent files after ingesting the
        # valid one should leave the registry empty.
        with self.assertRaises(Exception):
            self.runIngest(file=[self.file, "nonexistent.fits"])
        try:
            dt = self.butler.registry.getDatasetType("raw.metadata")
        except KeyError:
            # If we also rollback registering the DatasetType, that's fine,
            # but not required.
            pass
        else:
            self.assertIsNotNone(
                self.butler.registry.find(self.butler.collection, dt,
                                          self.dataId))
示例#9
0
 def setUp(self):
     self.root = tempfile.mkdtemp(dir=TESTDIR)
     Butler.makeRepo(self.root)
     # Create a random image for testing
     self.rng = Random(self.RANDOM_SEED)
示例#10
0
class ParquetFormatterTestCase(unittest.TestCase):
    """Tests for ParquetFormatter, using PosixDatastore.
    """
    def setUp(self):
        """Create a new butler root for each test."""
        self.root = tempfile.mkdtemp(dir=TESTDIR)
        Butler.makeRepo(self.root)
        self.butler = Butler(self.root, run="test_run")
        # No dimensions in dataset type so we don't have to worry about
        # inserting dimension data or defining data IDs.
        self.datasetType = DatasetType(
            "data",
            dimensions=(),
            storageClass="DataFrame",
            universe=self.butler.registry.dimensions)
        self.butler.registry.registerDatasetType(self.datasetType)

    def tearDown(self):
        if os.path.exists(self.root):
            shutil.rmtree(self.root, ignore_errors=True)

    def testSingleIndexDataFrame(self):
        columns1 = pd.Index(["a", "b", "c"])
        df1 = pd.DataFrame(np.random.randn(5, 3),
                           index=np.arange(5, dtype=int),
                           columns=columns1)
        self.butler.put(df1, self.datasetType, dataId={})
        # Read the whole DataFrame.
        df2 = self.butler.get(self.datasetType, dataId={})
        self.assertTrue(df1.equals(df2))
        # Read just the column descriptions.
        columns2 = self.butler.get(f"{self.datasetType.name}.columns",
                                   dataId={})
        self.assertTrue(df1.columns.equals(columns2))
        # Read just some columns a few different ways.
        df3 = self.butler.get(self.datasetType,
                              dataId={},
                              parameters={"columns": ["a", "c"]})
        self.assertTrue(df1.loc[:, ["a", "c"]].equals(df3))
        df4 = self.butler.get(self.datasetType,
                              dataId={},
                              parameters={"columns": "a"})
        self.assertTrue(df1.loc[:, ["a"]].equals(df4))
        # Passing an unrecognized column should be a ValueError.
        with self.assertRaises(ValueError):
            self.butler.get(self.datasetType,
                            dataId={},
                            parameters={"columns": ["d"]})

    def testMultiIndexDataFrame(self):
        columns1 = pd.MultiIndex.from_tuples(
            [
                ("g", "a"),
                ("g", "b"),
                ("g", "c"),
                ("r", "a"),
                ("r", "b"),
                ("r", "c"),
            ],
            names=["filter", "column"],
        )
        df1 = pd.DataFrame(np.random.randn(5, 6),
                           index=np.arange(5, dtype=int),
                           columns=columns1)
        self.butler.put(df1, self.datasetType, dataId={})
        # Read the whole DataFrame.
        df2 = self.butler.get(self.datasetType, dataId={})
        self.assertTrue(df1.equals(df2))
        # Read just the column descriptions.
        columns2 = self.butler.get(f"{self.datasetType.name}.columns",
                                   dataId={})
        self.assertTrue(df1.columns.equals(columns2))
        # Read just some columns a few different ways.
        df3 = self.butler.get(self.datasetType,
                              dataId={},
                              parameters={"columns": {
                                  "filter": "g"
                              }})
        self.assertTrue(df1.loc[:, ["g"]].equals(df3))
        df4 = self.butler.get(
            self.datasetType,
            dataId={},
            parameters={"columns": {
                "filter": ["r"],
                "column": "a"
            }})
        self.assertTrue(df1.loc[:, [("r", "a")]].equals(df4))
        # Passing an unrecognized column should be a ValueError.
        with self.assertRaises(ValueError):
            self.butler.get(self.datasetType,
                            dataId={},
                            parameters={"columns": ["d"]})
示例#11
0
class IngestTestBase(metaclass=abc.ABCMeta):
    """Base class for tests of gen3 ingest. Subclass from this, then
    `unittest.TestCase` to get a working test suite.
    """

    ingestDir = ""
    """Root path to ingest files into. Typically `obs_package/tests/`; the
    actual directory will be a tempdir under this one.
    """

    instrument = None
    """The instrument to be registered and tested."""

    dataIds = []
    """list of butler data IDs of files that should have been ingested."""

    file = ""
    """Full path to a file to ingest in tests."""

    RawIngestTask = lsst.obs.base.RawIngestTask
    """The task to use in the Ingest test."""
    def setUp(self):
        # Use a temporary working directory
        self.root = tempfile.mkdtemp(dir=self.ingestDir)
        Butler.makeRepo(self.root)
        self.butler = Butler(self.root, run="raw")

        # Register the instrument and its static metadata
        self.instrument.register(self.butler.registry)

        # Make a default config for test methods to play with
        self.config = self.RawIngestTask.ConfigClass()
        self.config.instrument = \
            f"{self.instrument.__class__.__module__}.{self.instrument.__class__.__name__}"

    def tearDown(self):
        if os.path.exists(self.root):
            shutil.rmtree(self.root, ignore_errors=True)

    def runIngest(self, files=None):
        """
        Initialize and run RawIngestTask on a list of files.

        Parameters
        ----------
        files : `list` [`str`], or None
            List of files to be ingested, or None to use ``self.file``
        """
        if files is None:
            files = [self.file]
        task = self.RawIngestTask(config=self.config, butler=self.butler)
        task.log.setLevel(
            task.log.FATAL)  # silence logs, since we expect a lot of warnings
        task.run(files)

    def runIngestTest(self, files=None):
        """
        Test that RawIngestTask ingested the expected files.

        Parameters
        ----------
        files : `list` [`str`], or None
            List of files to be ingested, or None to use ``self.file``
        """
        self.runIngest(files)
        datasets = self.butler.registry.queryDatasets('raw', collections=...)
        self.assertEqual(len(list(datasets)), len(self.dataIds))
        for dataId in self.dataIds:
            exposure = self.butler.get("raw", dataId)
            metadata = self.butler.get("raw.metadata", dataId)
            # only check the metadata, not the images, to speed up tests
            self.assertEqual(metadata.toDict(),
                             exposure.getMetadata().toDict())
            self.checkRepo(files=files)

    def checkRepo(self, files=None):
        """Check the state of the repository after ingest.

        This is an optional hook provided for subclasses; by default it does
        nothing.

        Parameters
        ----------
        files : `list` [`str`], or None
            List of files to be ingested, or None to use ``self.file``
        """
        pass

    def testSymLink(self):
        self.config.transfer = "symlink"
        self.runIngestTest()

    def testCopy(self):
        self.config.transfer = "copy"
        self.runIngestTest()

    def testHardLink(self):
        self.config.transfer = "hardlink"
        try:
            self.runIngestTest()
        except PermissionError as err:
            raise unittest.SkipTest(
                "Skipping hard-link test because input data"
                " is on a different filesystem.") from err

    def testInPlace(self):
        """Test that files already in the directory can be added to the
        registry in-place.
        """
        # symlink into repo root manually
        newPath = os.path.join(self.butler.datastore.root,
                               os.path.basename(self.file))
        os.symlink(os.path.abspath(self.file), newPath)
        self.config.transfer = None
        self.runIngestTest([newPath])

    def testFailOnConflict(self):
        """Re-ingesting the same data into the repository should fail.
        """
        self.config.transfer = "symlink"
        self.runIngest()
        with self.assertRaises(Exception):
            self.runIngest()
示例#12
0
                        dest="logLevel",
                        default=lsst.log.Log.INFO,
                        const=lsst.log.Log.DEBUG,
                        help="Set the log level to DEBUG.")

    args = parser.parse_args()
    log = lsst.log.Log.getLogger("lsst.daf.butler")
    log.setLevel(args.logLevel)

    # Forward python logging to lsst logger
    lgr = logging.getLogger("lsst.daf.butler")
    lgr.setLevel(logging.INFO if args.logLevel ==
                 lsst.log.Log.INFO else logging.DEBUG)
    lgr.addHandler(lsst.log.LogHandler())

    butler = Butler(args.root, collections=["HSC/calib"])

    def rewrite(dataset: FileDataset) -> FileDataset:
        # Join the datastore root to the exported path.  This should yield
        # absolute paths that start with $CI_HSC_GEN2_DIR.
        dataset.path = os.path.join(butler.datastore.root.ospath, dataset.path)
        # Remove symlinks in the path; this should result in absolute paths
        # that start with $TESTDATA_CI_HSC_DIR, because ci_hsc_gen2 always
        # symlinks these datasets from there.
        dataset.path = os.path.realpath(dataset.path)
        # Recompute the path relative to $TESTDATA_CI_HSC_DIR, so we can deal
        # with that moving around after the export file is created.
        dataset.path = os.path.relpath(dataset.path,
                                       getPackageDir("testdata_ci_hsc"))
        return dataset
#!/usr/bin/env python

from lsst.pipe.base import QuantumGraph
from lsst.daf.butler import DimensionUniverse, Butler
butler = Butler('/repo/main')
du = DimensionUniverse()
qgraph = QuantumGraph.loadUri('/home/krughoff/public_html/data/two_ccd_processccd.qgraph', du)
exports = set()
def runner(nodes, exports, visited=None):
    if not visited:
        visited = set()
    for node in nodes:
        if node in visited:
            continue
        exports.update([ref for thing in node.quantum.inputs.values() for ref in thing])
        exports.update([ref for thing in node.quantum.outputs.values() for ref in thing])
        exports.update([ref for ref in node.quantum.initInputs.values()])
        before = qgraph.determineAncestorsOfQuantumNode(node)
        visited.add(node)
        if before:
            runner(before, exports, visited)
runner([node for node in qgraph.getNodesForTask(qgraph.findTaskDefByLabel('calibrate'))], exports)
resolved_refs = [butler.registry.findDataset(datasetType=ex.datasetType, dataId=ex.dataId,
                 collections=butler.registry.queryCollections()) for ex in exports]

with butler.export(filename='export.yaml', directory='rsp_data_export', transfer='copy') as export:
    export.saveDatasets(resolved_refs)
    export.saveCollection("HSC/calib")
    export.saveCollection("HSC/calib/DM-28636")
    export.saveCollection("HSC/calib/gen2/20180117")
    export.saveCollection("HSC/calib/gen2/20180117/unbounded")
    type : `lsst.daf.butler.DatasetType`
        The type to rename.
    name : `str`
        The new name to adopt.

    Returns
    -------
    new_type : `lsst.daf.butler.DatasetType`
        The new DatasetType.
    """
    return DatasetType(name, type.dimensions, type.storageClass,
                       type.parentStorageClass)


src_repo = Butler(args.src_dir,
                  collections=args.src_collection,
                  writeable=False)
dest_repo = Butler(DEST_DIR, run=DEST_RUN, writeable=True)


def _remove_refcat_run(butler, run):
    """Remove a refcat run and any references from a repository.

    Parameters
    ----------
    butler : `lsst.daf.butler.Butler`
        The repository from which to remove ``run``.
    run : `str`
        The run to remove, if it exists.
    """
    try:
 def setUp(self):
     # make test collection
     # self.butler = makeTestCollection(self.creatorButler)
     self.collection = self._testMethodName
     self.butler = Butler(butler=self.creatorButler, run=self.collection)
示例#16
0
 def testMatplotlibFormatter(self):
     butler = Butler(self.root, run="testrun")
     datasetType = DatasetType("test_plot", [],
                               "Plot",
                               universe=butler.registry.dimensions)
     butler.registry.registerDatasetType(datasetType)
     # Does not have to be a random image
     pyplot.imshow([
         self.rng.sample(range(50), 10),
         self.rng.sample(range(50), 10),
         self.rng.sample(range(50), 10),
     ])
     ref = butler.put(pyplot.gcf(), datasetType)
     uri = butler.getURI(ref)
     # The test after this will not work if we don't have local file
     self.assertEqual(uri.scheme, "file", "Testing returned URI: {uri}")
     with tempfile.NamedTemporaryFile(suffix=".png") as file:
         pyplot.gcf().savefig(file.name)
         self.assertTrue(filecmp.cmp(uri.path, file.name, shallow=True))
     self.assertTrue(butler.datasetExists(ref))
     with self.assertRaises(ValueError):
         butler.get(ref)
     butler.pruneDatasets([ref], unstore=True, purge=True)
     with self.assertRaises(LookupError):
         butler.datasetExists(ref)
        # isn't being transferred.
        return {t.run for t in templates}


def _import(butler, export_file, base_dir):
    """Import the exported files.

    Parameters
    ----------
    butler : `lsst.daf.butler.Butler`
        A Butler pointing to the dataset repository.
    export_file : `str`
        A path pointing to a file containing the export results.
    base_dir : `str`
        The base directory for the file locations in ``export_file``.
    """
    butler.import_(directory=base_dir, filename=export_file, transfer="copy")


with tempfile.NamedTemporaryFile(suffix=".yaml") as export_file:
    src = Butler(args.src_dir,
                 collections=args.src_collection,
                 writeable=False)
    runs = _export(src, export_file.name)
    dest = Butler(DATASET_REPO, writeable=True)
    _import(dest, export_file.name, args.src_dir)
    dest.registry.registerCollection(TEMPLATE_COLLECT, CollectionType.CHAINED)
    dest.registry.setCollectionChain(TEMPLATE_COLLECT, runs)

logging.info(f"Templates stored in {DATASET_REPO}:{TEMPLATE_COLLECT}.")
示例#18
0
class TestCoaddOutputs(unittest.TestCase, MockCheckMixin):
    """Check that coadd outputs are as expected.

    Many tests here are ported from
    https://github.com/lsst/pipe_tasks/blob/
    fd7d5e23d3c71e5d440153bc4faae7de9d5918c5/tests/nopytest_test_coadds.py
    """
    def setUp(self):
        self.butler = Butler(os.path.join(getPackageDir("ci_hsc_gen3"), "DATA"),
                             instrument="HSC", skymap="discrete/ci_hsc",
                             writeable=False, collections=["HSC/runs/ci_hsc"])
        self.skip_mock()
        self._tract = 0
        self._patch = 69
        self._bands = ['r', 'i']

    def test_forced_id_names(self):
        """Test that forced photometry ID fields are named as expected
        (DM-8210).

        Specifically, coadd forced photometry should have only "id" and
        "parent" fields, while CCD forced photometry should have those,
        "objectId", and "parentObjectId".
        """
        coadd_schema = self.butler.get("deepCoadd_forced_src_schema").schema
        self.assertIn("id", coadd_schema)
        self.assertIn("parent", coadd_schema)
        self.assertNotIn("objectId", coadd_schema)
        self.assertNotIn("parentObjectId", coadd_schema)
        ccd_schema = self.butler.get("forced_src_schema").schema
        self.assertIn("id", ccd_schema)
        self.assertIn("parent", ccd_schema)
        self.assertIn("objectId", ccd_schema)
        self.assertIn("parentObjectId", ccd_schema)

    def test_alg_metadata_output(self):
        """Test that the algorithm metadata is persisted correctly
        from MeasureMergedCoaddSourcesTask.
        """
        for band in self._bands:
            cat = self.butler.get(
                "deepCoadd_meas",
                band=band,
                tract=self._tract,
                patch=self._patch
            )
            meta = cat.getMetadata()
            for circ_aperture_flux_radius in meta.getArray('BASE_CIRCULARAPERTUREFLUX_RADII'):
                self.assertIsInstance(circ_aperture_flux_radius, numbers.Number)
            # Each time the run method of a measurement task is executed,
            # algorithm metadata is appended to the algorithm metadata object.
            # Depending on how many times a measurement task is run,
            # a metadata entry may be a single value or multiple values.
            for n_offset in meta.getArray('NOISE_OFFSET'):
                self.assertIsInstance(n_offset, numbers.Number)
            for noise_src in meta.getArray('NOISE_SOURCE'):
                self.assertEqual(noise_src, 'measure')
            for noise_exp_id in meta.getArray('NOISE_EXPOSURE_ID'):
                self.assertIsInstance(noise_exp_id, numbers.Number)
            for noise_seed_mul in meta.getArray('NOISE_SEED_MULTIPLIER'):
                self.assertIsInstance(noise_seed_mul, numbers.Number)

    def test_schema_consistency(self):
        """Test that _schema catalogs are consistent with the data catalogs."""
        det_schema = self.butler.get("deepCoadd_det_schema").schema
        meas_schema = self.butler.get("deepCoadd_meas_schema").schema
        mergeDet_schema = self.butler.get("deepCoadd_mergeDet_schema").schema
        ref_schema = self.butler.get("deepCoadd_ref_schema").schema
        coadd_forced_schema = self.butler.get("deepCoadd_forced_src_schema").schema
        ccd_forced_schema = self.butler.get("forced_src_schema").schema
        for band in self._bands:
            det = self.butler.get("deepCoadd_det", band=band, tract=self._tract, patch=self._patch)
            self.assertEqual(det.schema, det_schema)
            mergeDet = self.butler.get("deepCoadd_mergeDet", band=band, tract=self._tract, patch=self._patch)
            self.assertEqual(mergeDet.schema, mergeDet_schema)
            meas = self.butler.get("deepCoadd_meas", band=band, tract=self._tract, patch=self._patch)
            self.assertEqual(meas.schema, meas_schema)
            ref = self.butler.get("deepCoadd_ref", band=band, tract=self._tract, patch=self._patch)
            self.assertEqual(ref.schema, ref_schema)
            coadd_forced_src = self.butler.get(
                "deepCoadd_forced_src",
                band=band,
                tract=self._tract,
                patch=self._patch
            )
            self.assertEqual(coadd_forced_src.schema, coadd_forced_schema)
        ccd_forced_src = self.butler.get(
            "forced_src",
            tract=self._tract,
            visit=DATA_IDS[0]["visit"],
            detector=DATA_IDS[0]["detector"]
        )
        self.assertEqual(ccd_forced_src.schema, ccd_forced_schema)

    def test_coadd_transmission_curves(self):
        """Test that coadded TransmissionCurves agree with the inputs."""
        wavelengths = np.linspace(4000, 7000, 10)
        n_object_test = 10
        ctx = np.random.RandomState(12345)

        for band in self._bands:
            n_tested = 0
            exp = self.butler.get("deepCoadd_calexp", band=band, tract=self._tract, patch=self._patch)
            cat = self.butler.get("objectTable", band=band, tract=self._tract, patch=self._patch)
            transmission_curve = exp.getInfo().getTransmissionCurve()
            coadd_inputs = exp.getInfo().getCoaddInputs().ccds
            wcs = exp.getWcs()

            to_check = ctx.choice(len(cat), size=n_object_test, replace=False)
            for index in to_check:
                coadd_coord = geom.SpherePoint(cat["coord_ra"].values[index]*geom.degrees,
                                               cat["coord_dec"].values[index]*geom.degrees)
                summed_throughput = np.zeros(wavelengths.shape, dtype=np.float64)
                weight_sum = 0.0
                for rec in coadd_inputs.subsetContaining(coadd_coord, includeValidPolygon=True):
                    det_pos = rec.getWcs().skyToPixel(coadd_coord)
                    det_trans = rec.getTransmissionCurve()
                    weight = rec.get("weight")
                    summed_throughput += det_trans.sampleAt(det_pos, wavelengths)*weight
                    weight_sum += weight
                if weight_sum == 0.0:
                    continue
                summed_throughput /= weight_sum
                coadd_pos = wcs.skyToPixel(coadd_coord)
                coadd_throughput = transmission_curve.sampleAt(coadd_pos, wavelengths)
                np.testing.assert_array_almost_equal(coadd_throughput, summed_throughput)
                n_tested += 1
            self.assertGreater(n_tested, 5)

    def test_mask_planes_exist(self):
        """Test that the input mask planes have been added."""
        for data_id in DATA_IDS:
            mask = self.butler.get("calexp.mask", data_id)
            self.assertIn("CROSSTALK", mask.getMaskPlaneDict())
            self.assertIn("NOT_DEBLENDED", mask.getMaskPlaneDict())

    # Expected to fail until DM-5174 is fixed.
    @unittest.expectedFailure
    def test_masks_removed(self):
        """Test that certain mask planes have been removed from the coadds.

        This is expected to fail until DM-5174 is fixed.
        """
        for band in self._bands:
            mask = self.butler.get("deepCoadd_calexp.mask", band=band, tract=self._tract, patch=self._patch)
            self.assertNotIn("CROSSTALK", mask.getMaskPlaneDict())
            self.assertNotIn("NOT_DEBLENDED", mask.getMaskPlaneDict())

    def test_warp_inputs(self):
        """Test that the warps have the correct inputs."""
        skymap = self.butler.get("skyMap")
        tract_info = skymap[self._tract]
        for warp_type in ["directWarp", "psfMatchedWarp"]:
            datasets = set(self.butler.registry.queryDatasets(f"deepCoadd_{warp_type}"))
            # We only need to test one dataset
            dataset = list(datasets)[0]

            warp = self.butler.getDirect(dataset)
            self.assertEqual(warp.wcs, tract_info.wcs)
            coadd_inputs = warp.getInfo().getCoaddInputs()
            self.assertEqual(len(coadd_inputs.visits), 1)
            visit_record = coadd_inputs.visits[0]
            self.assertEqual(visit_record.getWcs(), warp.wcs)
            self.assertEqual(visit_record.getBBox(), warp.getBBox())
            self.assertGreater(len(coadd_inputs.ccds), 0)

            wcs_cat = self.butler.get(
                "jointcalSkyWcsCatalog",
                visit=visit_record.getId(),
                tract=self._tract
            )
            photocalib_cat = self.butler.get(
                "jointcalPhotoCalibCatalog",
                visit=visit_record.getId(),
                tract=self._tract
            )
            final_psf_cat = self.butler.get(
                "finalized_psf_ap_corr_catalog",
                visit=visit_record.getId()
            )

            # We only need to test one input ccd
            det_record = coadd_inputs.ccds[0]
            exp_bbox = self.butler.get(
                "calexp.bbox",
                visit=det_record["visit"],
                detector=det_record["ccd"]
            )
            self.assertEqual(det_record.getWcs(), wcs_cat.find(det_record["ccd"]).getWcs())
            self.assertEqual(
                det_record.getPhotoCalib(),
                photocalib_cat.find(det_record["ccd"]).getPhotoCalib()
            )
            self.assertEqual(det_record.getBBox(), exp_bbox)
            self.assertIsNotNone(det_record.getTransmissionCurve())
            center = det_record.getBBox().getCenter()
            np.testing.assert_array_almost_equal(
                det_record.getPsf().computeKernelImage(center).array,
                final_psf_cat.find(det_record["ccd"]).getPsf().computeKernelImage(center).array
            )
            input_map = det_record.getApCorrMap()
            final_map = final_psf_cat.find(det_record["ccd"]).getApCorrMap()
            self.assertEqual(len(input_map), len(final_map))
            for key in input_map.keys():
                self.assertEqual(input_map[key], final_map[key])
            self.assertIsNotNone(coadd_inputs.visits.find(det_record["visit"]))

    def test_coadd_inputs(self):
        """Test that the coadds have the correct inputs."""
        skymap = self.butler.get("skyMap")
        tract_info = skymap[self._tract]
        for band in self._bands:
            wcs = self.butler.get("deepCoadd_calexp.wcs", band=band, tract=self._tract, patch=self._patch)
            self.assertEqual(wcs, tract_info.wcs)
            coadd_inputs = self.butler.get(
                "deepCoadd_calexp.coaddInputs",
                band=band,
                tract=self._tract,
                patch=self._patch
            )
            # We only need to test one input ccd
            det_record = coadd_inputs.ccds[0]
            wcs_cat = self.butler.get(
                "jointcalSkyWcsCatalog",
                visit=det_record["visit"],
                tract=self._tract
            )
            photocalib_cat = self.butler.get(
                "jointcalPhotoCalibCatalog",
                visit=det_record["visit"],
                tract=self._tract
            )
            final_psf_cat = self.butler.get(
                "finalized_psf_ap_corr_catalog",
                visit=det_record["visit"]
            )
            exp_bbox = self.butler.get(
                "calexp.bbox",
                visit=det_record["visit"],
                detector=det_record["ccd"]
            )
            self.assertEqual(det_record.getWcs(), wcs_cat.find(det_record["ccd"]).getWcs())
            self.assertEqual(
                det_record.getPhotoCalib(),
                photocalib_cat.find(det_record["ccd"]).getPhotoCalib()
            )
            self.assertEqual(det_record.getBBox(), exp_bbox)
            self.assertIsNotNone(det_record.getTransmissionCurve())
            center = det_record.getBBox().getCenter()
            np.testing.assert_array_almost_equal(
                det_record.getPsf().computeKernelImage(center).array,
                final_psf_cat.find(det_record["ccd"]).getPsf().computeKernelImage(center).array
            )
            input_map = det_record.getApCorrMap()
            final_map = final_psf_cat.find(det_record["ccd"]).getApCorrMap()
            self.assertEqual(len(input_map), len(final_map))
            for key in input_map.keys():
                self.assertEqual(input_map[key], final_map[key])
            self.assertIsNotNone(coadd_inputs.visits.find(det_record["visit"]))

    def test_psf_installation(self):
        """Test that the coadd psf is installed."""
        for band in self._bands:
            wcs = self.butler.get("deepCoadd_calexp.wcs", band=band, tract=self._tract, patch=self._patch)
            coadd_inputs = self.butler.get(
                "deepCoadd_calexp.coaddInputs",
                band=band,
                tract=self._tract,
                patch=self._patch
            )
            coadd_psf = self.butler.get(
                "deepCoadd_calexp.psf",
                band=band,
                tract=self._tract,
                patch=self._patch
            )
            new_psf = lsst.meas.algorithms.CoaddPsf(coadd_inputs.ccds, wcs)
            self.assertEqual(coadd_psf.getComponentCount(), len(coadd_inputs.ccds))
            self.assertEqual(new_psf.getComponentCount(), len(coadd_inputs.ccds))
            for n, record in enumerate(coadd_inputs.ccds):
                center = record.getBBox().getCenter()
                np.testing.assert_array_almost_equal(
                    coadd_psf.getPsf(n).computeKernelImage(center).array,
                    record.getPsf().computeKernelImage(center).array
                )
                np.testing.assert_array_almost_equal(
                    new_psf.getPsf(n).computeKernelImage(center).array,
                    record.getPsf().computeKernelImage(center).array
                )
                self.assertEqual(coadd_psf.getWcs(n), record.getWcs())
                self.assertEqual(new_psf.getWcs(n), record.getWcs())
                self.assertEqual(coadd_psf.getBBox(n), record.getBBox())
                self.assertEqual(new_psf.getBBox(n), record.getBBox())

    def test_coadd_psf(self):
        """Test that the stars on the coadd are well represented by
        the attached PSF.
        """
        n_object_test = 10
        n_good_test = 5
        ctx = np.random.RandomState(12345)

        for band in self._bands:
            exp = self.butler.get("deepCoadd_calexp", band=band, tract=self._tract, patch=self._patch)
            coadd_psf = exp.getPsf()
            cat = self.butler.get("objectTable", band=band, tract=self._tract, patch=self._patch)

            star_cat = cat[(cat["i_extendedness"] < 0.5)
                           & (cat["detect_isPrimary"])
                           & (cat[f"{band}_psfFlux"] > 0.0)
                           & (cat[f"{band}_psfFlux"]/cat[f"{band}_psfFluxErr"] > 50.0)
                           & (cat[f"{band}_psfFlux"]/cat[f"{band}_psfFluxErr"] < 200.0)]

            to_check = ctx.choice(len(star_cat), size=n_object_test, replace=False)
            n_good = 0
            for index in to_check:
                position = geom.Point2D(star_cat["x"].values[index], star_cat["y"].values[index])
                psf_image = coadd_psf.computeImage(position)
                psf_image_bbox = psf_image.getBBox()
                star_image = lsst.afw.image.ImageF(
                    exp.maskedImage.image,
                    psf_image_bbox
                ).convertD()
                star_image /= star_image.array.sum()
                psf_image /= psf_image.array.sum()
                residuals = lsst.afw.image.ImageD(star_image, True)
                residuals -= psf_image
                # This is just a quick check that the coadd psf model works
                # reasonably well for the stars. It is not meant as a detailed
                # test of the psf modeling capability.
                if np.max(np.abs(residuals.array)) < 0.01:
                    n_good += 1

            self.assertGreater(n_good, n_good_test)
示例#19
0
 def setUp(self):
     self.butler = Butler(REPO_ROOT, run="shared/ci_hsc")
示例#20
0
    def checkInstrumentWithRegistry(self, cls, testRaw):

        Butler.makeRepo(self.root)
        butler = Butler(self.root, run="tests")
        instrument = cls()
        scFactory = StorageClassFactory()

        # Check instrument class and metadata translator agree on
        # instrument name, using readRawFitsHeader to read the metadata.
        filename = os.path.join(DATAROOT, testRaw)
        md = readRawFitsHeader(filename, translator_class=cls.translatorClass)
        obsInfo = ObservationInfo(md,
                                  translator_class=cls.translatorClass,
                                  filename=filename)
        self.assertEqual(instrument.getName(), obsInfo.instrument)

        # Add Instrument, Detector, and PhysicalFilter entries to the
        # Butler Registry.
        instrument.register(butler.registry)

        # Define a DatasetType for the cameraGeom.Camera, which can be
        # accessed just by identifying its Instrument.
        # A real-world Camera DatasetType should be identified by a
        # validity range as well.
        cameraDatasetType = DatasetType(
            "camera",
            dimensions=["instrument"],
            storageClass=scFactory.getStorageClass("Camera"),
            universe=butler.registry.dimensions)
        butler.registry.registerDatasetType(cameraDatasetType)

        # Define a DatasetType for cameraGeom.Detectors, which can be
        # accessed by identifying its Instrument and (Butler) Detector.
        # A real-world Detector DatasetType probably doesn't need to exist,
        # as  it would just duplicate information in the Camera, and
        # reading a full Camera just to get a single Detector should be
        # plenty efficient.
        detectorDatasetType = DatasetType(
            "detector",
            dimensions=["instrument", "detector"],
            storageClass=scFactory.getStorageClass("Detector"),
            universe=butler.registry.dimensions)
        butler.registry.registerDatasetType(detectorDatasetType)

        # Put and get the Camera.
        dataId = dict(instrument=instrument.instrument)
        butler.put(instrument.getCamera(), "camera", dataId=dataId)
        camera = butler.get("camera", dataId)
        # Full camera comparisons are *slow*; just compare names.
        self.assertEqual(instrument.getCamera().getName(), camera.getName())

        # Put and get a random subset of the Detectors.
        allDetectors = list(instrument.getCamera())
        numDetectors = min(3, len(allDetectors))
        someDetectors = [
            allDetectors[i] for i in self.rng.choice(
                len(allDetectors), size=numDetectors, replace=False)
        ]
        for cameraGeomDetector in someDetectors:
            # Right now we only support integer detector IDs in data IDs;
            # support for detector names and groups (i.e. rafts) is
            # definitely planned but not yet implemented.
            dataId = dict(instrument=instrument.instrument,
                          detector=cameraGeomDetector.getId())
            butler.put(cameraGeomDetector, "detector", dataId=dataId)
            cameraGeomDetector2 = butler.get("detector", dataId=dataId)
            # Full detector comparisons are *slow*; just compare names and
            # serials.
            self.assertEqual(cameraGeomDetector.getName(),
                             cameraGeomDetector2.getName())
            self.assertEqual(cameraGeomDetector.getSerial(),
                             cameraGeomDetector2.getSerial())
示例#21
0
    def makeGraph(self, pipeline, taskFactory, args):
        """Build a graph from command line arguments.

        Parameters
        ----------
        pipeline : `~lsst.pipe.base.Pipeline`
            Pipeline, can be empty or ``None`` if graph is read from pickle
            file.
        taskFactory : `~lsst.pipe.base.TaskFactory`
            Task factory.
        args : `argparse.Namespace`
            Parsed command line

        Returns
        -------
        graph : `~lsst.pipe.base.QuantumGraph`
        """
        if args.qgraph:

            with open(args.qgraph, 'rb') as pickleFile:
                qgraph = pickle.load(pickleFile)
                if not isinstance(qgraph, QuantumGraph):
                    raise TypeError(
                        "QuantumGraph pickle file has incorrect object type: {}"
                        .format(type(qgraph)))

            # pipeline can not be provided in this case
            if pipeline:
                raise ValueError(
                    "Pipeline must not be given when quantum graph is read from file."
                )

        else:

            if not pipeline:
                raise ValueError(
                    "Pipeline must be given for quantum graph construction.")

            # build collection names
            inputs = args.input.copy()
            defaultInputs = inputs.pop("", None)
            outputs = args.output.copy()
            defaultOutputs = outputs.pop("", None)

            # Make butler instance. From this Butler we only need Registry
            # instance. Input/output collections are handled by pre-flight
            # and we don't want to be constrained here by Butler's restrictions
            # on collection names.
            collection = defaultInputs[0] if defaultInputs else None
            butler = Butler(config=args.butler_config, collection=collection)

            # if default input collections are not given on command line then
            # use one from Butler (has to be configured in butler config)
            if not defaultInputs:
                defaultInputs = [butler.collection]
            coll = DatasetOriginInfoDef(defaultInputs=defaultInputs,
                                        defaultOutput=defaultOutputs,
                                        inputOverrides=inputs,
                                        outputOverrides=outputs)

            # make execution plan (a.k.a. DAG) for pipeline
            graphBuilder = GraphBuilder(taskFactory, butler.registry,
                                        args.skip_existing)
            qgraph = graphBuilder.makeGraph(pipeline, coll, args.data_query)

        # count quanta in graph and give a warning if it's empty
        nQuanta = sum(1 for q in qgraph.quanta())
        if nQuanta == 0:
            warnings.warn("QuantumGraph is empty", stacklevel=2)
        else:
            _LOG.info("QuantumGraph contains %d quanta for %d tasks", nQuanta,
                      len(qgraph))

        if args.save_qgraph:
            with open(args.save_qgraph, "wb") as pickleFile:
                pickle.dump(qgraph, pickleFile)

        if args.qgraph_dot:
            graph2dot(qgraph, args.qgraph_dot)

        return qgraph
示例#22
0
    """Collection type names supported by the interface."""
    def _generate_next_value_(name, start, count,
                              last_values) -> str:  # type: ignore
        # Use the name directly as the value
        return name

    RUN = auto()
    CALIBRATION = auto()
    CHAINED = auto()
    TAGGED = auto()


app = FastAPI()
app.add_middleware(GZipMiddleware, minimum_size=1000)

GLOBAL_READONLY_BUTLER = Butler(BUTLER_ROOT, writeable=False)
GLOBAL_READWRITE_BUTLER = Butler(BUTLER_ROOT, writeable=True)


def butler_readonly_dependency() -> Butler:
    return Butler(butler=GLOBAL_READONLY_BUTLER)


def butler_readwrite_dependency() -> Butler:
    return Butler(butler=GLOBAL_READWRITE_BUTLER)


@app.get("/butler/")
def read_root():
    return "Welcome to Excalibur... aka your Butler Server"
示例#23
0
def makeTestRepo(root, dataIds, *, config=None, **kwargs):
    """Create an empty repository with dummy data IDs.

    Parameters
    ----------
    root : `str`
        The location of the root directory for the repository.
    dataIds : `~collections.abc.Mapping` [`str`, `iterable`]
        A mapping keyed by the dimensions used in the test. Each value
        is an iterable of names for that dimension (e.g., detector IDs for
        `"detector"`). Related dimensions (e.g., instruments and detectors)
        are linked arbitrarily.
    config : `lsst.daf.butler.Config`, optional
        A configuration for the repository (for details, see
        `lsst.daf.butler.Butler.makeRepo`). If omitted, creates a repository
        with default dataset and storage types, but optimized for speed.
        The defaults set ``.datastore.cls``, ``.datastore.checksum`` and
        ``.registry.db``.  If a supplied config does not specify these values
        the internal defaults will be used to ensure that we have a usable
        configuration.
    **kwargs
        Extra arguments to `lsst.daf.butler.Butler.makeRepo`.

    Returns
    -------
    butler : `lsst.daf.butler.Butler`
        A Butler referring to the new repository. This Butler is provided only
        for additional setup; to keep test cases isolated, it is highly
        recommended that each test create its own Butler with a
        unique run/collection. See `makeTestCollection`.

    Notes
    -----
    This function provides a "quick and dirty" repository for simple unit
    tests that don't depend on complex data relationships. Because it assigns
    dimension relationships and other metadata abitrarily, it is ill-suited
    for tests where the structure of the data matters. If you need such a
    dataset, create it directly or use a saved test dataset.

    Since the values in ``dataIds`` uniquely determine the repository's
    data IDs, the fully linked IDs can be recovered by calling
    `expandUniqueId`, so long as no other code has inserted dimensions into
    the repository registry.
    """
    defaults = Config()
    defaults["datastore", "cls"] = "lsst.daf.butler.datastores.inMemoryDatastore.InMemoryDatastore"
    defaults["datastore", "checksum"] = False  # In case of future changes
    defaults["registry", "db"] = "sqlite:///<butlerRoot>/gen3.sqlite3"

    if config:
        defaults.update(config)

    # Disable config root by default so that our registry override will
    # not be ignored.
    # newConfig guards against location-related keywords like outfile
    newConfig = Butler.makeRepo(root, config=defaults, forceConfigRoot=False, **kwargs)
    butler = Butler(newConfig, writeable=True)
    dimensionRecords = _makeRecords(dataIds, butler.registry.dimensions)
    for dimension, records in dimensionRecords.items():
        butler.registry.insertDimensionData(dimension, *records)
    return butler
示例#24
0
def butler_readonly_dependency() -> Butler:
    return Butler(butler=GLOBAL_READONLY_BUTLER)
示例#25
0
def makeDiscreteSkyMap(repo,
                       config_file,
                       collections,
                       instrument,
                       skymap_id='discrete',
                       old_skymap_id=None):
    """Implements the command line interface `butler make-discrete-skymap` subcommand,
    should only be called by command line tools and unit test code that tests
    this function.

    Constructs a skymap from calibrated exposure in the butler repository

    Parameters
    ----------
    repo : `str`
        URI to the location to read the repo.
    config_file : `str` or `None`
        Path to a config file that contains overrides to the skymap config.
    collections : `list` [`str`]
        An expression specifying the collections to be searched (in order) when
        reading datasets, and optionally dataset type restrictions on them.
        At least one collection must be specified.  This is the collection
        with the calibrated exposures.
    instrument : `str`
        The name or fully-qualified class name of an instrument.
    skymap_id : `str`, optional
        The identifier of the skymap to save.  Default is 'discrete'.
    old_skymap_id : `str`, optional
        The identifer of the skymap to append to.  Must differ from
        ``skymap_id``.  Ignored unless ``config.doAppend=True``.
    """
    butler = Butler(repo, collections=collections, writeable=True)
    instr = Instrument.from_string(instrument, butler.registry)
    config = MakeDiscreteSkyMapConfig()
    instr.applyConfigOverrides(MakeDiscreteSkyMapTask._DefaultName, config)

    if config_file is not None:
        config.load(config_file)
    # The coaddName for a SkyMap is only relevant in Gen2, and we completely
    # ignore it here; once Gen2 is gone it can be removed.
    oldSkyMap = None
    if config.doAppend:
        if old_skymap_id is None:
            raise ValueError(
                "old_skymap_id must be provided if config.doAppend is True.")
        dataId = {'skymap': old_skymap_id}
        try:
            oldSkyMap = butler.get(BaseSkyMap.SKYMAP_DATASET_TYPE_NAME,
                                   collections=collections,
                                   dataId=dataId)
        except LookupError as e:
            msg = (
                f"Could not find seed skymap with dataId {dataId} "
                f"in collections {collections} but doAppend is {config.doAppend}.  Aborting..."
            )
            raise LookupError(msg, *e.args[1:])

    datasets = butler.registry.queryDatasets('calexp', collections=collections)
    wcs_bbox_tuple_list = [(butler.getDirect(ref.makeComponentRef("wcs")),
                            butler.getDirect(ref.makeComponentRef("bbox")))
                           for ref in datasets]
    task = MakeDiscreteSkyMapTask(config=config)
    result = task.run(wcs_bbox_tuple_list, oldSkyMap)
    result.skyMap.register(skymap_id, butler)
示例#26
0
def butler_readwrite_dependency() -> Butler:
    return Butler(butler=GLOBAL_READWRITE_BUTLER)
示例#27
0
    def runPutGetTest(self, storageClass, datasetTypeName):
        butler = Butler(self.tmpConfigFile)

        # There will not be a collection yet
        collections = butler.registry.getAllCollections()
        self.assertEqual(collections, set())

        # Create and register a DatasetType
        dimensions = butler.registry.dimensions.extract(
            ["instrument", "visit"])

        datasetType = self.addDatasetType(datasetTypeName, dimensions,
                                          storageClass, butler.registry)

        # Add needed Dimensions
        butler.registry.addDimensionEntry("instrument",
                                          {"instrument": "DummyCamComp"})
        butler.registry.addDimensionEntry("physical_filter", {
            "instrument": "DummyCamComp",
            "physical_filter": "d-r"
        })
        butler.registry.addDimensionEntry("visit", {
            "instrument": "DummyCamComp",
            "visit": 423,
            "physical_filter": "d-r"
        })

        # Create and store a dataset
        metric = makeExampleMetrics()
        dataId = {"instrument": "DummyCamComp", "visit": 423}

        # Create a DatasetRef for put
        refIn = DatasetRef(datasetType, dataId, id=None)

        # Put with a preexisting id should fail
        with self.assertRaises(ValueError):
            butler.put(metric, DatasetRef(datasetType, dataId, id=100))

        # Put and remove the dataset once as a DatasetRef, once as a dataId,
        # and once with a DatasetType
        for args in ((refIn, ), (datasetTypeName, dataId), (datasetType,
                                                            dataId)):
            with self.subTest(args=args):
                ref = butler.put(metric, *args)
                self.assertIsInstance(ref, DatasetRef)

                # Test getDirect
                metricOut = butler.getDirect(ref)
                self.assertEqual(metric, metricOut)
                # Test get
                metricOut = butler.get(ref.datasetType.name, dataId)
                self.assertEqual(metric, metricOut)
                # Test get with a datasetRef
                metricOut = butler.get(ref)
                self.assertEqual(metric, metricOut)

                # Check we can get components
                if storageClass.isComposite():
                    self.assertGetComponents(butler, ref,
                                             ("summary", "data", "output"),
                                             metric)

                # Remove from collection only; after that we shouldn't be able
                # to find it unless we use the dataset_id.
                butler.remove(*args, delete=False)
                with self.assertRaises(LookupError):
                    butler.datasetExists(*args)
                # If we use the output ref with the dataset_id, we should
                # still be able to load it with getDirect().
                self.assertEqual(metric, butler.getDirect(ref))

                # Reinsert into collection, then delete from Datastore *and*
                # remove from collection.
                butler.registry.associate(butler.collection, [ref])
                butler.remove(*args)
                # Lookup with original args should still fail.
                with self.assertRaises(LookupError):
                    butler.datasetExists(*args)
                # Now getDirect() should fail, too.
                with self.assertRaises(FileNotFoundError):
                    butler.getDirect(ref)
                # Registry still knows about it, if we use the dataset_id.
                self.assertEqual(butler.registry.getDataset(ref.id), ref)

                # Put again, then remove completely (this generates a new
                # dataset record in registry, with a new ID - the old one
                # still exists but it is not in any collection so we don't
                # care).
                ref = butler.put(metric, *args)
                butler.remove(*args, remember=False)
                # Lookup with original args should still fail.
                with self.assertRaises(LookupError):
                    butler.datasetExists(*args)
                # getDirect() should still fail.
                with self.assertRaises(FileNotFoundError):
                    butler.getDirect(ref)
                # Registry shouldn't be able to find it by dataset_id anymore.
                self.assertIsNone(butler.registry.getDataset(ref.id))

        # Put the dataset again, since the last thing we did was remove it.
        ref = butler.put(metric, refIn)

        # Get with parameters
        stop = 4
        sliced = butler.get(ref, parameters={"slice": slice(stop)})
        self.assertNotEqual(metric, sliced)
        self.assertEqual(metric.summary, sliced.summary)
        self.assertEqual(metric.output, sliced.output)
        self.assertEqual(metric.data[:stop], sliced.data)

        # Combining a DatasetRef with a dataId should fail
        with self.assertRaises(ValueError):
            butler.get(ref, dataId)
        # Getting with an explicit ref should fail if the id doesn't match
        with self.assertRaises(ValueError):
            butler.get(DatasetRef(ref.datasetType, ref.dataId, id=101))

        # Getting a dataset with unknown parameters should fail
        with self.assertRaises(KeyError):
            butler.get(ref, parameters={"unsupported": True})

        # Check we have a collection
        collections = butler.registry.getAllCollections()
        self.assertEqual(collections, {
            "ingest",
        })
class FormattersTests(DatasetTestHelper, lsst.utils.tests.TestCase):
    root = None
    storageClassFactory = None

    @classmethod
    def setUpClass(cls):
        """Create a new butler once only."""

        cls.storageClassFactory = StorageClassFactory()

        cls.root = tempfile.mkdtemp(dir=TESTDIR)

        data_ids = {
            "instrument": [INSTRUMENT_NAME],
            "detector": [0, 1, 2, 3, 4, 5],
            "exposure": [11, 22],
        }

        configURI = ButlerURI("resource://spherex/configs",
                              forceDirectory=True)
        butlerConfig = Config(configURI.join("butler.yaml"))
        # in-memory db is being phased out
        # butlerConfig["registry", "db"] = 'sqlite:///:memory:'
        cls.creatorButler = makeTestRepo(
            cls.root,
            data_ids,
            config=butlerConfig,
            dimensionConfig=configURI.join("dimensions.yaml"))
        for formatter in FORMATTERS:
            datasetTypeName, storageClassName = (formatter["dataset_type"],
                                                 formatter["storage_class"])
            storageClass = cls.storageClassFactory.getStorageClass(
                storageClassName)
            addDatasetType(cls.creatorButler, datasetTypeName, set(data_ids),
                           storageClass)

    @classmethod
    def tearDownClass(cls):
        if cls.root is not None:
            shutil.rmtree(cls.root, ignore_errors=True)

    def setUp(self):
        # make test collection
        # self.butler = makeTestCollection(self.creatorButler)
        self.collection = self._testMethodName
        self.butler = Butler(butler=self.creatorButler, run=self.collection)

    def test_putget(self):
        fitsPath = os.path.join(TESTDIR, "data", "small.fits")
        dataid = {"exposure": 11, "detector": 0, "instrument": INSTRUMENT_NAME}
        for formatter in FORMATTERS:
            # in-memory object, representing fits
            inmemobj = formatter["reader"](fitsPath)

            # save in-memory object into butler dataset
            datasetTypeName = formatter["dataset_type"]
            self.butler.put(inmemobj, datasetTypeName, dataid)

            # get butler dataset
            retrievedobj = self.butler.get(datasetTypeName, dataid)
            self.assertTrue(isinstance(retrievedobj, formatter["inmem_cls"]))
            self.assertTrue(retrievedobj.__class__.__name__,
                            inmemobj.__class__.__name__)

    def test_ingest(self):

        fitsPath = os.path.join(TESTDIR, "data", "small.fits")

        formatter = FORMATTERS[0]
        datasetTypeName, formatterCls = (formatter["dataset_type"],
                                         formatter["formatter_cls"])

        datasetType = self.butler.registry.getDatasetType(datasetTypeName)
        datasets = []
        for exposure in range(3, 5):
            for detector in range(6):
                # use the same fits to test ingest
                if not os.path.exists(fitsPath):
                    log.warning(
                        f"No data found for detector {detector}, exposure {exposure} @ {fitsPath}."
                    )
                    continue
                ref = DatasetRef(datasetType,
                                 dataId={
                                     "instrument": INSTRUMENT_NAME,
                                     "detector": detector,
                                     "exposure": exposure * 11
                                 })
                datasets.append(
                    FileDataset(refs=ref,
                                path=fitsPath,
                                formatter=formatterCls))

        # register new collection
        # run = "rawIngestedRun"
        # self.butler.registry.registerCollection(run, type=CollectionType.RUN)

        # collection is registered as a part of setUp
        run = self.collection

        with self.butler.transaction():
            for exposure in range(3, 5):
                expid = exposure * 11
                self.butler.registry.insertDimensionData(
                    "exposure", {
                        "instrument": INSTRUMENT_NAME,
                        "id": expid,
                        "name": f"{expid}",
                        "group_name": "day1",
                        "timespan": Timespan(begin=None, end=None)
                    })
            # transfer can be 'auto', 'move', 'copy', 'hardlink', 'relsymlink'
            # or 'symlink'
            self.butler.ingest(*datasets, transfer="symlink", run=run)

        # verify that 12 files were ingested (2 exposures for each detector)
        refsSet = set(
            self.butler.registry.queryDatasets(datasetTypeName,
                                               collections=[run]))
        self.assertEqual(
            len(refsSet), 12,
            f"Collection {run} should have 12 elements after ingest")

        # verify that data id is present
        dataid = {"exposure": 44, "detector": 5, "instrument": INSTRUMENT_NAME}
        refsList = list(
            self.butler.registry.queryDatasets(datasetTypeName,
                                               collections=[run],
                                               dataId=dataid))
        self.assertEqual(
            len(refsList), 1,
            f"Collection {run} should have 1 element with {dataid}")
示例#29
0
    def testPutTemplates(self):
        storageClass = self.storageClassFactory.getStorageClass(
            "StructuredDataNoComponents")
        butler = Butler(self.tmpConfigFile)

        # Add needed Dimensions
        butler.registry.addDimensionEntry("instrument",
                                          {"instrument": "DummyCamComp"})
        butler.registry.addDimensionEntry("physical_filter", {
            "instrument": "DummyCamComp",
            "physical_filter": "d-r"
        })
        butler.registry.addDimensionEntry("visit", {
            "instrument": "DummyCamComp",
            "visit": 423,
            "physical_filter": "d-r"
        })
        butler.registry.addDimensionEntry("visit", {
            "instrument": "DummyCamComp",
            "visit": 425,
            "physical_filter": "d-r"
        })

        # Create and store a dataset
        metric = makeExampleMetrics()

        # Create two almost-identical DatasetTypes (both will use default
        # template)
        dimensions = butler.registry.dimensions.extract(
            ["instrument", "visit"])
        butler.registry.registerDatasetType(
            DatasetType("metric1", dimensions, storageClass))
        butler.registry.registerDatasetType(
            DatasetType("metric2", dimensions, storageClass))
        butler.registry.registerDatasetType(
            DatasetType("metric3", dimensions, storageClass))

        dataId1 = {"instrument": "DummyCamComp", "visit": 423}
        dataId2 = {
            "instrument": "DummyCamComp",
            "visit": 423,
            "physical_filter": "d-r"
        }
        dataId3 = {"instrument": "DummyCamComp", "visit": 425}

        # Put with exactly the data ID keys needed
        ref = butler.put(metric, "metric1", dataId1)
        self.assertTrue(
            self.checkFileExists(butler.datastore.root,
                                 "ingest/metric1/DummyCamComp_423.pickle"))

        # Check the template based on dimensions
        butler.datastore.templates.validateTemplates([ref])

        # Put with extra data ID keys (physical_filter is an optional
        # dependency); should not change template (at least the way we're
        # defining them  to behave now; the important thing is that they
        # must be consistent).
        ref = butler.put(metric, "metric2", dataId2)
        self.assertTrue(
            self.checkFileExists(butler.datastore.root,
                                 "ingest/metric2/DummyCamComp_423.pickle"))

        # Check the template based on dimensions
        butler.datastore.templates.validateTemplates([ref])

        # Now use a file template that will not result in unique filenames
        ref = butler.put(metric, "metric3", dataId1)

        # Check the template based on dimensions. This one is a bad template
        with self.assertRaises(FileTemplateValidationError):
            butler.datastore.templates.validateTemplates([ref])

        with self.assertRaises(FileExistsError):
            butler.put(metric, "metric3", dataId3)
示例#30
0
    def testRegistryDefaults(self):
        """Test that we can default the collections and some data ID keys when
        constructing a butler.

        Many tests that use default run already exist in ``test_butler.py``, so
        that isn't tested here.  And while most of this functionality is
        implemented in `Registry`, we test it here instead of
        ``daf/butler/tests/registry.py`` because it shouldn't depend on the
        database backend at all.
        """
        butler = self.makeButler(writeable=True)
        butler.import_(
            filename=os.path.join(TESTDIR, "data", "registry", "base.yaml"))
        butler.import_(filename=os.path.join(TESTDIR, "data", "registry",
                                             "datasets.yaml"))
        # Need to actually set defaults later, not at construction, because
        # we need to import the instrument before we can use it as a default.
        # Don't set a default instrument value for data IDs, because 'Cam1'
        # should be inferred by virtue of that being the only value in the
        # input collections.
        butler.registry.defaults = RegistryDefaults(collections=["imported_g"])
        # Use findDataset without collections or instrument.
        ref = butler.registry.findDataset("flat",
                                          detector=2,
                                          physical_filter="Cam1-G")
        # Do the same with Butler.get; this should ultimately invoke a lot of
        # the same code, so it's a bit circular, but mostly we're checking that
        # it works at all.
        dataset_id, _ = butler.get("flat",
                                   detector=2,
                                   physical_filter="Cam1-G")
        self.assertEqual(ref.id, dataset_id)
        # Query for datasets.  Test defaulting the data ID in both kwargs and
        # in the WHERE expression.
        queried_refs_1 = set(
            butler.registry.queryDatasets("flat",
                                          detector=2,
                                          physical_filter="Cam1-G"))
        self.assertEqual({ref}, queried_refs_1)
        queried_refs_2 = set(
            butler.registry.queryDatasets(
                "flat", where="detector=2 AND physical_filter='Cam1-G'"))
        self.assertEqual({ref}, queried_refs_2)
        # Query for data IDs with a dataset constraint.
        queried_data_ids = set(
            butler.registry.queryDataIds(
                {"instrument", "detector", "physical_filter"},
                datasets={"flat"},
                detector=2,
                physical_filter="Cam1-G"))
        self.assertEqual({ref.dataId}, queried_data_ids)
        # Add another instrument to the repo, and a dataset that uses it to
        # the `imported_g` collection.
        butler.registry.insertDimensionData("instrument", {"name": "Cam2"})
        camera = DatasetType(
            "camera",
            dimensions=butler.registry.dimensions["instrument"].graph,
            storageClass="Camera",
        )
        butler.registry.registerDatasetType(camera)
        butler.registry.insertDatasets(camera, [{
            "instrument": "Cam2"
        }],
                                       run="imported_g")
        # Initialize a new butler with `imported_g` as its default run.
        # This should not have a default instrument, because there are two.
        # Pass run instead of collections; this should set both.
        butler2 = Butler(butler=butler, run="imported_g")
        self.assertEqual(list(butler2.registry.defaults.collections),
                         ["imported_g"])
        self.assertEqual(butler2.registry.defaults.run, "imported_g")
        self.assertFalse(butler2.registry.defaults.dataId)
        # Initialize a new butler with an instrument default explicitly given.
        # Set collections instead of run, which should then be None.
        butler3 = Butler(butler=butler,
                         collections=["imported_g"],
                         instrument="Cam2")
        self.assertEqual(list(butler3.registry.defaults.collections),
                         ["imported_g"])
        self.assertIsNone(butler3.registry.defaults.run, None)
        self.assertEqual(butler3.registry.defaults.dataId.byName(),
                         {"instrument": "Cam2"})