Пример #1
0
 def makeRegistry(self) -> Registry:
     prefix = f"test_{secrets.token_hex(8).lower()}_"
     self._prefixes.append(prefix)
     config = self.makeRegistryConfig()
     # Can't use Registry.fromConfig for these tests because we don't want
     # to reconnect to the server every single time.  But we at least use
     # OracleDatabase.fromConnection rather than the constructor so
     # we can try to pass a prefix through via "+" in a namespace.
     database = OracleDatabase.fromConnection(connection=self._connection,
                                              origin=0,
                                              namespace=f"+{prefix}")
     attributes = doImport(config["managers", "attributes"])
     opaque = doImport(config["managers", "opaque"])
     dimensions = doImport(config["managers", "dimensions"])
     collections = doImport(config["managers", "collections"])
     datasets = doImport(config["managers", "datasets"])
     datastoreBridges = doImport(config["managers", "datastores"])
     return Registry(database=database,
                     attributes=attributes,
                     opaque=opaque,
                     dimensions=dimensions,
                     collections=collections,
                     datasets=datasets,
                     datastoreBridges=datastoreBridges,
                     universe=DimensionUniverse(config),
                     create=True)
Пример #2
0
    def setUp(self):
        self.id = 0

        # Create DatasetRefs to test against constraints model
        self.universe = DimensionUniverse()
        dimensions = self.universe.extract(
            ("visit", "physical_filter", "instrument"))
        sc = StorageClass("DummySC", dict, None)
        self.calexpA = self.makeDatasetRef("calexp",
                                           dimensions,
                                           sc, {
                                               "instrument": "A",
                                               "physical_filter": "u"
                                           },
                                           conform=False)

        dimensions = self.universe.extract(("visit", "detector", "instrument"))
        self.pviA = self.makeDatasetRef("pvi",
                                        dimensions,
                                        sc, {
                                            "instrument": "A",
                                            "visit": 1
                                        },
                                        conform=False)
        self.pviB = self.makeDatasetRef("pvi",
                                        dimensions,
                                        sc, {
                                            "instrument": "B",
                                            "visit": 2
                                        },
                                        conform=False)
Пример #3
0
    def testRegistryWithStorageClass(self):
        """Test that the registry can be given a StorageClass object.
        """
        formatterTypeName = "lsst.daf.butler.formatters.yaml.YamlFormatter"
        storageClassName = "TestClass"
        sc = StorageClass(storageClassName, dict, None)

        universe = DimensionUniverse()
        datasetType = DatasetType("calexp", universe.empty, sc)

        # Store using an instance
        self.factory.registerFormatter(sc, formatterTypeName)

        # Retrieve using the class
        f = self.factory.getFormatter(sc, self.fileDescriptor)
        self.assertIsFormatter(f)
        self.assertEqual(f.fileDescriptor, self.fileDescriptor)

        # Retrieve using the DatasetType
        f2 = self.factory.getFormatter(datasetType, self.fileDescriptor)
        self.assertIsFormatter(f2)
        self.assertEqual(f.name(), f2.name())

        # Class directly
        f2cls = self.factory.getFormatterClass(datasetType)
        self.assertIsFormatter(f2cls)

        # This might defer the import, pytest may have already loaded it
        from lsst.daf.butler.formatters.yaml import YamlFormatter
        self.assertEqual(type(f), YamlFormatter)

        with self.assertRaises(KeyError):
            # Attempt to overwrite using a different value
            self.factory.registerFormatter(storageClassName,
                                           "lsst.daf.butler.formatters.json.JsonFormatter")
Пример #4
0
 def setUp(self):
     self.universe = DimensionUniverse()
     self.dataId = {
         "instrument": "dummy",
         "visit": 52,
         "physical_filter": "U"
     }
Пример #5
0
    def testMap(self):
        universe = DimensionUniverse()
        c = CompositesMap(self.configFile, universe=universe)

        # Check that a str is not supported
        with self.assertRaises(ValueError):
            c.shouldBeDisassembled("fred")

        # These will fail (not a composite)
        sc = StorageClass("StructuredDataJson")
        d = DatasetType("dummyTrue", universe.empty, sc)
        self.assertFalse(sc.isComposite())
        self.assertFalse(d.isComposite())
        self.assertFalse(c.shouldBeDisassembled(d),
                         f"Test with DatasetType: {d}")
        self.assertFalse(c.shouldBeDisassembled(sc),
                         f"Test with StorageClass: {sc}")

        # Repeat but this time use a composite storage class
        sccomp = StorageClass("Dummy")
        sc = StorageClass("StructuredDataJson",
                          components={
                              "dummy": sccomp,
                              "dummy2": sccomp
                          })
        d = DatasetType("dummyTrue", universe.empty, sc)
        self.assertTrue(sc.isComposite())
        self.assertTrue(d.isComposite())
        self.assertTrue(c.shouldBeDisassembled(d),
                        f"Test with DatasetType: {d}")
        self.assertFalse(c.shouldBeDisassembled(sc),
                         f"Test with StorageClass: {sc}")

        # Override with False
        d = DatasetType("dummyFalse", universe.empty, sc)
        self.assertFalse(c.shouldBeDisassembled(d),
                         f"Test with DatasetType: {d}")

        # DatasetType that has no explicit entry
        d = DatasetType("dummyFred", universe.empty, sc)
        self.assertFalse(c.shouldBeDisassembled(d),
                         f"Test with DatasetType: {d}")

        # StorageClass that will be disassembled
        sc = StorageClass("StructuredComposite",
                          components={
                              "dummy": sccomp,
                              "dummy2": sccomp
                          })
        d = DatasetType("dummyFred", universe.empty, sc)
        self.assertTrue(c.shouldBeDisassembled(d),
                        f"Test with DatasetType: {d}")

        # Check that we are not allowed a single component in a composite
        with self.assertRaises(ValueError):
            StorageClass("TestSC", components={"dummy": sccomp})
Пример #6
0
 def setUp(self):
     self.universe = DimensionUniverse()
     datasetTypeName = "test"
     self.componentStorageClass1 = StorageClass("Component1")
     self.componentStorageClass2 = StorageClass("Component2")
     self.parentStorageClass = StorageClass("Parent", components={"a": self.componentStorageClass1,
                                                                  "b": self.componentStorageClass2})
     dimensions = self.universe.extract(("instrument", "visit"))
     self.dataId = dict(instrument="DummyCam", visit=42)
     self.datasetType = DatasetType(datasetTypeName, dimensions, self.parentStorageClass)
Пример #7
0
    def setUp(self):
        self.id = 0
        self.factory = FormatterFactory()
        self.universe = DimensionUniverse()
        self.dataId = DataCoordinate.makeEmpty(self.universe)

        # Dummy FileDescriptor for testing getFormatter
        self.fileDescriptor = FileDescriptor(
            Location("/a/b/c", "d"),
            StorageClass("DummyStorageClass", dict, None))
Пример #8
0
    def setUpClass(cls):
        # Storage Classes are fixed for all datastores in these tests
        scConfigFile = os.path.join(TESTDIR, "config/basic/storageClasses.yaml")
        cls.storageClassFactory = StorageClassFactory()
        cls.storageClassFactory.addFromConfig(scConfigFile)

        # Read the Datastore config so we can get the class
        # information (since we should not assume the constructor
        # name here, but rely on the configuration file itself)
        datastoreConfig = DatastoreConfig(cls.configFile)
        cls.datastoreType = doImport(datastoreConfig["cls"])
        cls.universe = DimensionUniverse()
Пример #9
0
 def setUp(self):
     self.universe = DimensionUniverse()
     self.fixed = ExpandedDataCoordinate(
         DimensionGraph(universe=self.universe, names=["skymap"]),
         values=("unimportant", ),
         records={
             "skymap":
             self.universe["skymap"].RecordClass.fromDict({
                 "name": "unimportant",
                 "tract_max": 5,
                 "patch_nx_max": 3,
                 "patch_ny_max": 3,
             })
         })
Пример #10
0
 def makeRegistry(self) -> Registry:
     prefix = f"test_{secrets.token_hex(8).lower()}_"
     self._prefixes.append(prefix)
     config = RegistryConfig()
     # Can't use Registry.fromConfig for these tests because we don't want
     # to reconnect to the server every single time.  But we at least use
     # OracleDatabase.fromConnection rather than the constructor so
     # we can try to pass a prefix through via "+" in a namespace.
     database = OracleDatabase.fromConnection(connection=self._connection,
                                              origin=0,
                                              namespace=f"+{prefix}")
     return Registry(database=database,
                     dimensions=DimensionUniverse(config),
                     create=True)
Пример #11
0
 def setUp(self):
     self.universe = DimensionUniverse()
     self.fixed = DataCoordinate.fromFullValues(
         DimensionGraph(universe=self.universe, names=["skymap"]),
         values=("unimportant", ),
     ).expanded(
         records={
             "skymap":
             self.universe["skymap"].RecordClass(
                 name="unimportant",
                 tract_max=5,
                 patch_nx_max=3,
                 patch_ny_max=3,
             )
         })
Пример #12
0
 def testPickling(self):
     # Pickling and copying should always yield the exact same object within
     # a single process (cross-process is impossible to test here).
     universe1 = DimensionUniverse()
     universe2 = pickle.loads(pickle.dumps(universe1))
     universe3 = copy.copy(universe1)
     universe4 = copy.deepcopy(universe1)
     self.assertIs(universe1, universe2)
     self.assertIs(universe1, universe3)
     self.assertIs(universe1, universe4)
     for element1 in universe1.elements:
         element2 = pickle.loads(pickle.dumps(element1))
         self.assertIs(element1, element2)
         graph1 = element1.graph
         graph2 = pickle.loads(pickle.dumps(graph1))
         self.assertIs(graph1, graph2)
Пример #13
0
    def _makeQuanta(self, config):
        """Create set of Quanta"""
        universe = DimensionUniverse()
        connections = config.connections.ConnectionsClass(config=config)

        dstype0 = connections.input.makeDatasetType(universe)
        dstype1 = connections.output.makeDatasetType(universe)

        quanta = []
        for visit in range(100):
            inputRef = self._makeDSRefVisit(dstype0, visit, universe)
            outputRef = self._makeDSRefVisit(dstype1, visit, universe)
            quantum = Quantum(
                inputs={inputRef.datasetType: [inputRef]}, outputs={outputRef.datasetType: [outputRef]}
            )
            quanta.append(quantum)

        return quanta
Пример #14
0
    def _makeQuanta(self, config):
        """Create set of Quanta
        """
        universe = DimensionUniverse()
        run = Run(collection=1, environment=None, pipeline=None)
        connections = config.connections.ConnectionsClass(config=config)

        dstype0 = connections.input.makeDatasetType(universe)
        dstype1 = connections.output.makeDatasetType(universe)

        quanta = []
        for visit in range(100):
            quantum = Quantum(run=run)
            quantum.addPredictedInput(
                self._makeDSRefVisit(dstype0, visit, universe))
            quantum.addOutput(self._makeDSRefVisit(dstype1, visit, universe))
            quanta.append(quantum)

        return quanta
Пример #15
0
    def testAddInputsOutputs(self):
        """Test of addPredictedInput() method.
        """
        quantum = Quantum(taskName="some.task.object", run=None)

        # start with empty
        self.assertEqual(quantum.predictedInputs, dict())
        universe = DimensionUniverse()
        instrument = "DummyCam"
        datasetTypeName = "test_ds"
        storageClass = StorageClass("testref_StructuredData")
        datasetType = DatasetType(datasetTypeName,
                                  universe.extract(("instrument", "visit")),
                                  storageClass)

        # add one ref
        ref = DatasetRef(datasetType, dict(instrument=instrument, visit=42))
        quantum.addPredictedInput(ref)
        self.assertIn(datasetTypeName, quantum.predictedInputs)
        self.assertEqual(len(quantum.predictedInputs[datasetTypeName]), 1)
        # add second ref
        ref = DatasetRef(datasetType, dict(instrument=instrument, visit=43))
        quantum.addPredictedInput(ref)
        self.assertEqual(len(quantum.predictedInputs[datasetTypeName]), 2)

        # mark last ref as actually used
        self.assertEqual(quantum.actualInputs, dict())
        quantum._markInputUsed(ref)
        self.assertIn(datasetTypeName, quantum.actualInputs)
        self.assertEqual(len(quantum.actualInputs[datasetTypeName]), 1)

        # add couple of outputs too
        self.assertEqual(quantum.outputs, dict())
        ref = DatasetRef(datasetType, dict(instrument=instrument, visit=42))
        quantum.addOutput(ref)
        self.assertIn(datasetTypeName, quantum.outputs)
        self.assertEqual(len(quantum.outputs[datasetTypeName]), 1)

        ref = DatasetRef(datasetType, dict(instrument=instrument, visit=43))
        quantum.addOutput(ref)
        self.assertEqual(len(quantum.outputs[datasetTypeName]), 2)
Пример #16
0
    def testConstructor(self):
        """Test of constructor.
        """
        # Quantum specific arguments
        taskName = "some.task.object"  # can't use a real PipelineTask due to inverted package dependency

        quantum = Quantum(taskName=taskName)
        self.assertEqual(quantum.taskName, taskName)
        self.assertEqual(quantum.initInputs, {})
        self.assertEqual(quantum.inputs, NamedKeyDict())
        self.assertEqual(quantum.outputs, {})
        self.assertIsNone(quantum.dataId)

        universe = DimensionUniverse()
        instrument = "DummyCam"
        datasetTypeName = "test_ds"
        storageClass = StorageClass("testref_StructuredData")
        datasetType = DatasetType(datasetTypeName,
                                  universe.extract(("instrument", "visit")),
                                  storageClass)
        predictedInputs = {
            datasetType: [
                DatasetRef(datasetType, dict(instrument=instrument, visit=42)),
                DatasetRef(datasetType, dict(instrument=instrument, visit=43))
            ]
        }
        outputs = {
            datasetType: [
                DatasetRef(datasetType, dict(instrument=instrument, visit=42)),
                DatasetRef(datasetType, dict(instrument=instrument, visit=43))
            ]
        }

        quantum = Quantum(taskName=taskName,
                          inputs=predictedInputs,
                          outputs=outputs)
        self.assertEqual(len(quantum.inputs[datasetType]), 2)
        self.assertEqual(len(quantum.outputs[datasetType]), 2)
Пример #17
0
def pipeline2dot(pipeline, file):
    """Convert Pipeline into GraphViz digraph.

    This method is mostly for documentation/presentation purposes.
    Unlike other methods this method does not validate graph consistency.

    Parameters
    ----------
    pipeline : `pipe.base.Pipeline`
        Pipeline description.
    file : str or file object
        File where GraphViz graph (DOT language) is written, can be a file name
        or file object.

    Raises
    ------
    `OSError` is raised when output file cannot be open.
    `ImportError` is raised when task class cannot be imported.
    `MissingTaskFactoryError` is raised when TaskFactory is needed but not
    provided.
    """
    universe = DimensionUniverse()

    def expand_dimensions(dimensions):
        """Returns expanded list of dimensions, with special skypix treatment.

        Parameters
        ----------
        dimensions : `list` [`str`]

        Returns
        -------
        dimensions : `list` [`str`]
        """
        dimensions = set(dimensions)
        skypix_dim = []
        if "skypix" in dimensions:
            dimensions.remove("skypix")
            skypix_dim = ["skypix"]
        dimensions = universe.extract(dimensions)
        return list(dimensions.names) + skypix_dim

    # open a file if needed
    close = False
    if not hasattr(file, "write"):
        file = open(file, "w")
        close = True

    print("digraph Pipeline {", file=file)

    allDatasets = set()
    if isinstance(pipeline, Pipeline):
        pipeline = pipeline.toExpandedPipeline()
    for idx, taskDef in enumerate(pipeline):

        # node for a task
        taskNodeName = "task{}".format(idx)
        _renderTaskNode(taskNodeName, taskDef, file, idx)

        for attr in iterConnections(taskDef.connections, 'inputs'):
            if attr.name not in allDatasets:
                dimensions = expand_dimensions(attr.dimensions)
                _renderDSTypeNode(attr.name, dimensions, file)
                allDatasets.add(attr.name)
            _renderEdge(attr.name, taskNodeName, file)

        for attr in iterConnections(taskDef.connections, 'prerequisiteInputs'):
            if attr.name not in allDatasets:
                dimensions = expand_dimensions(attr.dimensions)
                _renderDSTypeNode(attr.name, dimensions, file)
                allDatasets.add(attr.name)
            # use dashed line for prerequisite edges to distinguish them
            _renderEdge(attr.name, taskNodeName, file, style="dashed")

        for attr in iterConnections(taskDef.connections, 'outputs'):
            if attr.name not in allDatasets:
                dimensions = expand_dimensions(attr.dimensions)
                _renderDSTypeNode(attr.name, dimensions, file)
                allDatasets.add(attr.name)
            _renderEdge(taskNodeName, attr.name, file)

    print("}", file=file)
    if close:
        file.close()
Пример #18
0
    def pack_data_id(self, tract, patch, band=None):
        """Pack a skymap-based data ID into an integer.

        Parameters
        ----------
        tract : `int`
            Integer ID for the tract.
        patch : `tuple` (`int`) or `int`
            Either a 2-element (x, y) tuple (Gen2 patch ID) or a single integer
            (Gen3 patch ID, corresponding to the "sequential" patch index
            methods in this package).
        band : `str`, optional
            If provided, a filter name present in
            `SkyMapDimensionPacker.SUPPORTED_FILTERS` (which is aspirationally
            a list of all Gen3 'bands', but in practice may be missing some;
            see RFC-785).  If not provided, the packing algorithm that does
            not include the filter will be used.

        Returns
        -------
        packed : `int`
            Integer that corresponds to the data ID.
        max_bits : `int`
            Maximum number of bits that ``packed`` could have, assuming this
            skymap and presence or absence of ``band``.

        Notes
        -----
        This method uses a Gen3 `lsst.daf.butler.DimensionPacker` object under
        the hood to guarantee consistency with pure Gen3 code, but it does not
        require the caller to actually have a Gen3 butler available.  It does,
        however, require a filter value compatible with the Gen3 "band"
        dimension.

        This is a temporary interface intended to aid with the migration from
        Gen2 to Gen3 middleware.  It will be removed with the Gen2 middleware
        or when DM-31924 provides a longer-term replacement, whichever comes
        first.  Pure Gen3 code should use `lsst.daf.butler.DataCoordinate.pack`
        or other `lsst.daf.butler.DimensionPacker` interfaces.
        """
        from lsst.daf.butler import DataCoordinate, DimensionUniverse
        universe = DimensionUniverse()
        dummy_skymap_name = "unimportant"  # only matters to Gen3 registry
        tract_info = self[tract]
        patch_info = tract_info[patch]
        nx, ny = tract_info.getNumPatches()
        skymap_record = universe["skymap"].RecordClass(
            name=dummy_skymap_name,
            hash=self.getSha1(),
            tract_max=len(self),
            patch_nx_max=
            nx,  # assuming these are the same for all tracts for now
            patch_ny_max=ny,
        )
        skymap_data_id = DataCoordinate.standardize(
            skymap=dummy_skymap_name,
            universe=universe,
        ).expanded(records={"skymap": skymap_record}, )
        full_data_id = DataCoordinate.standardize(
            skymap=dummy_skymap_name,
            tract=tract_info.getId(),
            patch=tract_info.getSequentialPatchIndex(patch_info),
            universe=universe,
        )
        if band is None:
            packer = universe.makePacker("tract_patch", skymap_data_id)
        else:
            packer = universe.makePacker("tract_patch_band", skymap_data_id)
            full_data_id = DataCoordinate.standardize(full_data_id, band=band)
        return packer.pack(full_data_id, returnMaxBits=True)
Пример #19
0
 def __init__(self):
     self.datasets = {}
     self.registry = SimpleNamespace(dimensions=DimensionUniverse())
Пример #20
0
#!/usr/bin/env python

from lsst.pipe.base import QuantumGraph
from lsst.daf.butler import DimensionUniverse, Butler
butler = Butler('/repo/main')
du = DimensionUniverse()
qgraph = QuantumGraph.loadUri('/home/krughoff/public_html/data/two_ccd_processccd.qgraph', du)
exports = set()
def runner(nodes, exports, visited=None):
    if not visited:
        visited = set()
    for node in nodes:
        if node in visited:
            continue
        exports.update([ref for thing in node.quantum.inputs.values() for ref in thing])
        exports.update([ref for thing in node.quantum.outputs.values() for ref in thing])
        exports.update([ref for ref in node.quantum.initInputs.values()])
        before = qgraph.determineAncestorsOfQuantumNode(node)
        visited.add(node)
        if before:
            runner(before, exports, visited)
runner([node for node in qgraph.getNodesForTask(qgraph.findTaskDefByLabel('calibrate'))], exports)
resolved_refs = [butler.registry.findDataset(datasetType=ex.datasetType, dataId=ex.dataId,
                 collections=butler.registry.queryCollections()) for ex in exports]

with butler.export(filename='export.yaml', directory='rsp_data_export', transfer='copy') as export:
    export.saveDatasets(resolved_refs)
    export.saveCollection("HSC/calib")
    export.saveCollection("HSC/calib/DM-28636")
    export.saveCollection("HSC/calib/gen2/20180117")
    export.saveCollection("HSC/calib/gen2/20180117/unbounded")
Пример #21
0
 def setUp(self):
     self.universe = DimensionUniverse()
Пример #22
0
 def __init__(self):
     self._counter = 0
     self._entries = {}
     self._externalTableRows = {}
     self._externalTableSpecs = {}
     self.dimensions = DimensionUniverse()
Пример #23
0
 def setUp(self):
     config = Config(
         {
             "version": 1,
             "namespace": "pipe_base_test",
             "skypix": {
                 "common": "htm7",
                 "htm": {
                     "class": "lsst.sphgeom.HtmPixelization",
                     "max_level": 24,
                 },
             },
             "elements": {
                 "A": {
                     "keys": [
                         {
                             "name": "id",
                             "type": "int",
                         }
                     ],
                     "storage": {
                         "cls": "lsst.daf.butler.registry.dimensions.table.TableDimensionRecordStorage",
                     },
                 },
                 "B": {
                     "keys": [
                         {
                             "name": "id",
                             "type": "int",
                         }
                     ],
                     "storage": {
                         "cls": "lsst.daf.butler.registry.dimensions.table.TableDimensionRecordStorage",
                     },
                 },
             },
             "packers": {},
         }
     )
     universe = DimensionUniverse(config=config)
     # need to make a mapping of TaskDef to set of quantum
     quantumMap = {}
     tasks = []
     for task, label in (
         (Dummy1PipelineTask, "R"),
         (Dummy2PipelineTask, "S"),
         (Dummy3PipelineTask, "T"),
         (Dummy4PipelineTask, "U"),
     ):
         config = task.ConfigClass()
         taskDef = TaskDef(get_full_type_name(task), config, task, label)
         tasks.append(taskDef)
         quantumSet = set()
         connections = taskDef.connections
         for a, b in ((1, 2), (3, 4)):
             if connections.initInputs:
                 initInputDSType = DatasetType(
                     connections.initInput.name,
                     tuple(),
                     storageClass=connections.initInput.storageClass,
                     universe=universe,
                 )
                 initRefs = [DatasetRef(initInputDSType, DataCoordinate.makeEmpty(universe))]
             else:
                 initRefs = None
             inputDSType = DatasetType(
                 connections.input.name,
                 connections.input.dimensions,
                 storageClass=connections.input.storageClass,
                 universe=universe,
             )
             inputRefs = [
                 DatasetRef(inputDSType, DataCoordinate.standardize({"A": a, "B": b}, universe=universe))
             ]
             outputDSType = DatasetType(
                 connections.output.name,
                 connections.output.dimensions,
                 storageClass=connections.output.storageClass,
                 universe=universe,
             )
             outputRefs = [
                 DatasetRef(outputDSType, DataCoordinate.standardize({"A": a, "B": b}, universe=universe))
             ]
             quantumSet.add(
                 Quantum(
                     taskName=task.__qualname__,
                     dataId=DataCoordinate.standardize({"A": a, "B": b}, universe=universe),
                     taskClass=task,
                     initInputs=initRefs,
                     inputs={inputDSType: inputRefs},
                     outputs={outputDSType: outputRefs},
                 )
             )
         quantumMap[taskDef] = quantumSet
     self.tasks = tasks
     self.quantumMap = quantumMap
     self.qGraph = QuantumGraph(quantumMap, metadata=METADATA)
     self.universe = universe
Пример #24
0
 def __init__(self):
     self._opaque = DummyOpaqueTableStorageManager()
     self.dimensions = DimensionUniverse()
     self._datastoreBridges = DummyDatastoreRegistryBridgeManager(
         self._opaque, self.dimensions)
Пример #25
0
    def testRegistryConfig(self):
        configFile = os.path.join(TESTDIR, "config", "basic",
                                  "posixDatastore.yaml")
        config = Config(configFile)
        universe = DimensionUniverse()
        self.factory.registerFormatters(config["datastore", "formatters"],
                                        universe=universe)

        # Create a DatasetRef with and without instrument matching the
        # one in the config file.
        dimensions = universe.extract(
            ("visit", "physical_filter", "instrument"))
        sc = StorageClass("DummySC", dict, None)
        refPviHsc = self.makeDatasetRef("pvi",
                                        dimensions,
                                        sc, {
                                            "instrument": "DummyHSC",
                                            "physical_filter": "v"
                                        },
                                        conform=False)
        refPviHscFmt = self.factory.getFormatterClass(refPviHsc)
        self.assertIsFormatter(refPviHscFmt)
        self.assertIn("JsonFormatter", refPviHscFmt.name())

        refPviNotHsc = self.makeDatasetRef("pvi",
                                           dimensions,
                                           sc, {
                                               "instrument": "DummyNotHSC",
                                               "physical_filter": "v"
                                           },
                                           conform=False)
        refPviNotHscFmt = self.factory.getFormatterClass(refPviNotHsc)
        self.assertIsFormatter(refPviNotHscFmt)
        self.assertIn("PickleFormatter", refPviNotHscFmt.name())

        # Create a DatasetRef that should fall back to using Dimensions
        refPvixHsc = self.makeDatasetRef("pvix",
                                         dimensions,
                                         sc, {
                                             "instrument": "DummyHSC",
                                             "physical_filter": "v"
                                         },
                                         conform=False)
        refPvixNotHscFmt = self.factory.getFormatterClass(refPvixHsc)
        self.assertIsFormatter(refPvixNotHscFmt)
        self.assertIn("PickleFormatter", refPvixNotHscFmt.name())

        # Create a DatasetRef that should fall back to using StorageClass
        dimensionsNoV = DimensionGraph(universe,
                                       names=("physical_filter", "instrument"))
        refPvixNotHscDims = self.makeDatasetRef("pvix",
                                                dimensionsNoV,
                                                sc, {
                                                    "instrument": "DummyHSC",
                                                    "physical_filter": "v"
                                                },
                                                conform=False)
        refPvixNotHscDims_fmt = self.factory.getFormatterClass(
            refPvixNotHscDims)
        self.assertIsFormatter(refPvixNotHscDims_fmt)
        self.assertIn("YamlFormatter", refPvixNotHscDims_fmt.name())
Пример #26
0
    def testRegistryConfig(self):
        configFile = os.path.join(TESTDIR, "config", "basic", "posixDatastore.yaml")
        config = Config(configFile)
        universe = DimensionUniverse()
        self.factory.registerFormatters(config["datastore", "formatters"], universe=universe)

        # Create a DatasetRef with and without instrument matching the
        # one in the config file.
        dimensions = universe.extract(("visit", "physical_filter", "instrument"))
        sc = StorageClass("DummySC", dict, None)
        refPviHsc = self.makeDatasetRef("pvi", dimensions, sc, {"instrument": "DummyHSC",
                                                                "physical_filter": "v"},
                                        conform=False)
        refPviHscFmt = self.factory.getFormatterClass(refPviHsc)
        self.assertIsFormatter(refPviHscFmt)
        self.assertIn("JsonFormatter", refPviHscFmt.name())

        refPviNotHsc = self.makeDatasetRef("pvi", dimensions, sc, {"instrument": "DummyNotHSC",
                                                                   "physical_filter": "v"},
                                           conform=False)
        refPviNotHscFmt = self.factory.getFormatterClass(refPviNotHsc)
        self.assertIsFormatter(refPviNotHscFmt)
        self.assertIn("PickleFormatter", refPviNotHscFmt.name())

        # Create a DatasetRef that should fall back to using Dimensions
        refPvixHsc = self.makeDatasetRef("pvix", dimensions, sc, {"instrument": "DummyHSC",
                                                                  "physical_filter": "v"},
                                         conform=False)
        refPvixNotHscFmt = self.factory.getFormatterClass(refPvixHsc)
        self.assertIsFormatter(refPvixNotHscFmt)
        self.assertIn("PickleFormatter", refPvixNotHscFmt.name())

        # Create a DatasetRef that should fall back to using StorageClass
        dimensionsNoV = DimensionGraph(universe, names=("physical_filter", "instrument"))
        refPvixNotHscDims = self.makeDatasetRef("pvix", dimensionsNoV, sc, {"instrument": "DummyHSC",
                                                                            "physical_filter": "v"},
                                                conform=False)
        refPvixNotHscDims_fmt = self.factory.getFormatterClass(refPvixNotHscDims)
        self.assertIsFormatter(refPvixNotHscDims_fmt)
        self.assertIn("YamlFormatter", refPvixNotHscDims_fmt.name())

        # Check that parameters are stored
        refParam = self.makeDatasetRef("paramtest", dimensions, sc, {"instrument": "DummyNotHSC",
                                                                     "physical_filter": "v"},
                                       conform=False)
        lookup, refParam_fmt, kwargs = self.factory.getFormatterClassWithMatch(refParam)
        self.assertIn("writeParameters", kwargs)
        expected = {"max": 5, "min": 2, "comment": "Additional commentary", "recipe": "recipe1"}
        self.assertEqual(kwargs["writeParameters"], expected)
        self.assertIn("FormatterTest", refParam_fmt.name())

        f = self.factory.getFormatter(refParam, self.fileDescriptor)
        self.assertEqual(f.writeParameters, expected)

        f = self.factory.getFormatter(refParam, self.fileDescriptor, writeParameters={"min": 22,
                                                                                      "extra": 50})
        self.assertEqual(f.writeParameters, {"max": 5, "min": 22, "comment": "Additional commentary",
                                             "extra": 50, "recipe": "recipe1"})

        self.assertIn("recipe1", f.writeRecipes)
        self.assertEqual(f.writeParameters["recipe"], "recipe1")

        with self.assertRaises(ValueError):
            # "new" is not allowed as a write parameter
            self.factory.getFormatter(refParam, self.fileDescriptor, writeParameters={"new": 1})

        with self.assertRaises(RuntimeError):
            # "mode" is a required recipe parameter
            self.factory.getFormatter(refParam, self.fileDescriptor, writeRecipes={"recipe3": {"notmode": 1}})