def testConstructor(self):
        """Test for making new instances.
        """

        dataId = DataId(dict(instrument="DummyInstrument",
                             detector=1,
                             visit=2,
                             physical_filter="i"),
                        universe=self.universe)
        self.assertEqual(len(dataId), 4)
        self.assertCountEqual(
            dataId.keys(),
            ("instrument", "detector", "visit", "physical_filter"))
Exemple #2
0
def _refFromConnection(butler: Butler, connection: DimensionedConnection,
                       dataId: DataId, **kwargs: Any) -> DatasetRef:
    """Create a DatasetRef for a connection in a collection.

    Parameters
    ----------
    butler : `lsst.daf.butler.Butler`
        The collection to point to.
    connection : `lsst.pipe.base.connectionTypes.DimensionedConnection`
        The connection defining the dataset type to point to.
    dataId
        The data ID for the dataset to point to.
    **kwargs
        Additional keyword arguments used to augment or construct
        a `~lsst.daf.butler.DataCoordinate`.

    Returns
    -------
    ref : `lsst.daf.butler.DatasetRef`
        A reference to a dataset compatible with ``connection``, with ID
        ``dataId``, in the collection pointed to by ``butler``.
    """
    universe = butler.registry.dimensions
    # DatasetRef only tests if required dimension is missing, but not extras
    _checkDimensionsMatch(universe, set(connection.dimensions), dataId.keys())
    dataId = DataCoordinate.standardize(dataId, **kwargs, universe=universe)

    # skypix is a PipelineTask alias for "some spatial index", Butler doesn't
    # understand it. Code copied from TaskDatasetTypes.fromTaskDef
    if "skypix" in connection.dimensions:
        datasetType = butler.registry.getDatasetType(connection.name)
    else:
        datasetType = connection.makeDatasetType(universe)

    try:
        butler.registry.getDatasetType(datasetType.name)
    except KeyError:
        raise ValueError(f"Invalid dataset type {connection.name}.")
    try:
        ref = DatasetRef(datasetType=datasetType, dataId=dataId)
        return ref
    except KeyError as e:
        raise ValueError(
            f"Dataset type ({connection.name}) and ID {dataId.byName()} not compatible."
        ) from e
Exemple #3
0
def makeQuantum(
    task: PipelineTask,
    butler: Butler,
    dataId: DataId,
    ioDataIds: Mapping[str, Union[DataId, Sequence[DataId]]],
) -> Quantum:
    """Create a Quantum for a particular data ID(s).

    Parameters
    ----------
    task : `lsst.pipe.base.PipelineTask`
        The task whose processing the quantum represents.
    butler : `lsst.daf.butler.Butler`
        The collection the quantum refers to.
    dataId: any data ID type
        The data ID of the quantum. Must have the same dimensions as
        ``task``'s connections class.
    ioDataIds : `collections.abc.Mapping` [`str`]
        A mapping keyed by input/output names. Values must be data IDs for
        single connections and sequences of data IDs for multiple connections.

    Returns
    -------
    quantum : `lsst.daf.butler.Quantum`
        A quantum for ``task``, when called with ``dataIds``.
    """
    connections = task.config.ConnectionsClass(config=task.config)

    try:
        _checkDimensionsMatch(butler.registry.dimensions,
                              connections.dimensions, dataId.keys())
    except ValueError as e:
        raise ValueError("Error in quantum dimensions.") from e

    inputs = defaultdict(list)
    outputs = defaultdict(list)
    for name in itertools.chain(connections.inputs,
                                connections.prerequisiteInputs):
        try:
            connection = connections.__getattribute__(name)
            _checkDataIdMultiplicity(name, ioDataIds[name],
                                     connection.multiple)
            ids = _normalizeDataIds(ioDataIds[name])
            for id in ids:
                ref = _refFromConnection(butler, connection, id)
                inputs[ref.datasetType].append(ref)
        except (ValueError, KeyError) as e:
            raise ValueError(f"Error in connection {name}.") from e
    for name in connections.outputs:
        try:
            connection = connections.__getattribute__(name)
            _checkDataIdMultiplicity(name, ioDataIds[name],
                                     connection.multiple)
            ids = _normalizeDataIds(ioDataIds[name])
            for id in ids:
                ref = _refFromConnection(butler, connection, id)
                outputs[ref.datasetType].append(ref)
        except (ValueError, KeyError) as e:
            raise ValueError(f"Error in connection {name}.") from e
    quantum = Quantum(
        taskClass=type(task),
        dataId=DataCoordinate.standardize(dataId,
                                          universe=butler.registry.dimensions),
        inputs=inputs,
        outputs=outputs,
    )
    return quantum