def testInvalidMetricSegregation(self, _mockWriter, _mockButler,
                                     _mockMetricsLoader):
        self.config.measurers = ["demoMetric"]
        self.task = MetricsControllerTask(self.config)
        with unittest.mock.patch.object(_DemoMetricTask,
                                        "adaptArgsAndRun") as mockCall:
            # Run _DemoMetricTask twice, with one failure and one result
            mockCall.side_effect = (MetricComputationError,
                                    unittest.mock.DEFAULT)
            expectedValue = 1.0 * u.second
            mockCall.return_value = Struct(measurement=lsst.verify.Measurement(
                _metricName(), expectedValue))

            dataIds = [{"visit": 42, "ccd": 101, "filter": "k"},
                       {"visit": 42, "ccd": 102, "filter": "k"}]
            datarefs = [_makeMockDataref(dataId) for dataId in dataIds]

            jobs = self.task.runDataRefs(datarefs).jobs
            self.assertEqual(len(jobs), len(datarefs))

            # Failed job
            self.assertEqual(len(jobs[0].measurements), 0)

            # Successful job
            self.assertTrue(jobs[1].meta["tested"])
            self.assertEqual(len(jobs[1].measurements), 1)
            assert_quantity_allclose(
                jobs[1].measurements[_metricName()].quantity,
                expectedValue)
def _runMetricTasks(config, dataRefs):
    """Run MetricControllerTask on a single dataset.

    Parameters
    ----------
    config : `lsst.verify.gen2tasks.MetricsControllerConfig`
        The config for running `~lsst.verify.gen2tasks.MetricsControllerTask`.
    dataRefs : `list` [`lsst.daf.persistence.ButlerDataRef`]
        The data references over which to compute metrics. The granularity
        determines the metric granularity; see
        `MetricsControllerTask.runDataRef` for more details.
    """
    allMetricTasks = MetricsControllerTask(config)
    allMetricTasks.runDataRefs([_sanitizeRef(ref) for ref in dataRefs])
    def setUp(self):
        self.config = MetricsControllerTask.ConfigClass()
        self.config.metadataAdder.retarget(_TestMetadataAdder)
        self.config.measurers = ["demoMetric", "repeatedMetric"]

        self.config.measurers["demoMetric"].multiplier = 2.0
        repeated = self.config.measurers["repeatedMetric"]
        repeated.configs["first"] = DemoMetricConfig()
        repeated.configs["first"].metric = _extraMetricName1()
        repeated.configs["second"] = DemoMetricConfig()
        repeated.configs["second"].metric = _extraMetricName2()
        repeated.configs["second"].multiplier = 3.4

        self.task = MetricsControllerTask(self.config)
def _getMetricsConfig(userFile, defaultFile, metricsOutputTemplate=None):
    """Load a metrics config based on program settings.

    Parameters
    ----------
    userFile : `str` or `None`
        The path provided by the user for this config file.
    defaultFile : `str`
        The filename (not a path) of the default config file.
    metricsOutputTemplate : `str` or `None`
        The files to which to write metrics. If not `None`, this argument
        overrides any output files set by either config file.

    Returns
    -------
    config : `lsst.verify.gen2tasks.MetricsControllerConfig`
        The config from ``userFile`` if the user provided one, otherwise the
        default config.
    """
    timingConfig = MetricsControllerTask.ConfigClass()

    if userFile is not None:
        timingConfig.load(userFile)
    else:
        timingConfig.load(os.path.join(lsst.utils.getPackageDir("ap_verify"), "config", defaultFile))
    if metricsOutputTemplate:
        timingConfig.jobFileTemplate = metricsOutputTemplate
    return timingConfig
class MetricsControllerTestSuite(lsst.utils.tests.TestCase):

    def setUp(self):
        self.config = MetricsControllerTask.ConfigClass()
        self.config.metadataAdder.retarget(_TestMetadataAdder)
        self.config.measurers = ["demoMetric", "repeatedMetric"]

        self.config.measurers["demoMetric"].multiplier = 2.0
        repeated = self.config.measurers["repeatedMetric"]
        repeated.configs["first"] = DemoMetricConfig()
        repeated.configs["first"].metric = _extraMetricName1()
        repeated.configs["second"] = DemoMetricConfig()
        repeated.configs["second"].metric = _extraMetricName2()
        repeated.configs["second"].multiplier = 3.4

        self.task = MetricsControllerTask(self.config)

    def _allMetricTaskConfigs(self):
        configs = []
        for name, topConfig in zip(self.config.measurers.names,
                                   self.config.measurers.active):
            if name != "repeatedMetric":
                configs.append(topConfig)
            else:
                configs.extend(topConfig.configs.values())
        return configs

    def _checkMetric(self, mockWriter, datarefs, unitsOfWork):
        """Standardized test battery for running a metric.

        Parameters
        ----------
        mockWriter : `unittest.mock.CallableMock`
            A queriable placeholder for `lsst.verify.Job.write`.
        datarefs : `list` of `lsst.daf.persistence.ButlerDataRef`
            The inputs to `MetricsControllerTask.runDataRefs`.
        unitsOfWork : `list` of `int`
            The number of science pipeline units of work (i.e., CCD-visit
            pairs) that should be combined to make a metric for each element
            of ``datarefs``.
        """
        if len(datarefs) != len(unitsOfWork):
            raise ValueError("Test requires matching datarefs "
                             "and unitsOfWork")

        jobs = self.task.runDataRefs(datarefs).jobs
        self.assertEqual(len(jobs), len(datarefs))
        for job, dataref, nTimings in zip(jobs, datarefs, unitsOfWork):
            taskConfigs = self._allMetricTaskConfigs()
            self.assertEqual(len(job.measurements), len(taskConfigs))
            for metricName, metricConfig in zip(job.measurements, taskConfigs):
                self.assertEqual(metricName, Name(metricConfig.metric))
                assert_quantity_allclose(
                    job.measurements[metricConfig.metric].quantity,
                    metricConfig.multiplier * float(nTimings) * u.second)

            self.assertTrue(job.meta["tested"])

        # Exact arguments to Job.write are implementation detail, don't test
        if not jobs:
            mockWriter.assert_not_called()
        elif len(jobs) == 1:
            mockWriter.assert_called_once()
        else:
            mockWriter.assert_called()

    def testCcdGrainedMetric(self, mockWriter, _mockButler,
                             _mockMetricsLoader):
        dataId = {"visit": 42, "ccd": 101, "filter": "k"}
        datarefs = [_makeMockDataref(dataId)]
        self._checkMetric(mockWriter, datarefs, unitsOfWork=[1])

    def testVisitGrainedMetric(self, mockWriter, _mockButler,
                               _mockMetricsLoader):
        dataId = {"visit": 42, "filter": "k"}
        datarefs = [_makeMockDataref(dataId)]
        self._checkMetric(mockWriter, datarefs, unitsOfWork=[2])

    def testDatasetGrainedMetric(self, mockWriter, _mockButler,
                                 _mockMetricsLoader):
        dataId = {}
        datarefs = [_makeMockDataref(dataId)]
        self._checkMetric(mockWriter, datarefs, unitsOfWork=[6])

    def testMultipleMetrics(self, mockWriter, _mockButler,
                            _mockMetricsLoader):
        dataIds = [{"visit": 42, "ccd": 101, "filter": "k"},
                   {"visit": 42, "ccd": 102, "filter": "k"}]
        datarefs = [_makeMockDataref(dataId) for dataId in dataIds]
        self._checkMetric(mockWriter, datarefs,
                          unitsOfWork=[1] * len(dataIds))

    def testInvalidMetricSegregation(self, _mockWriter, _mockButler,
                                     _mockMetricsLoader):
        self.config.measurers = ["demoMetric"]
        self.task = MetricsControllerTask(self.config)
        with unittest.mock.patch.object(_DemoMetricTask,
                                        "adaptArgsAndRun") as mockCall:
            # Run _DemoMetricTask twice, with one failure and one result
            mockCall.side_effect = (MetricComputationError,
                                    unittest.mock.DEFAULT)
            expectedValue = 1.0 * u.second
            mockCall.return_value = Struct(measurement=lsst.verify.Measurement(
                _metricName(), expectedValue))

            dataIds = [{"visit": 42, "ccd": 101, "filter": "k"},
                       {"visit": 42, "ccd": 102, "filter": "k"}]
            datarefs = [_makeMockDataref(dataId) for dataId in dataIds]

            jobs = self.task.runDataRefs(datarefs).jobs
            self.assertEqual(len(jobs), len(datarefs))

            # Failed job
            self.assertEqual(len(jobs[0].measurements), 0)

            # Successful job
            self.assertTrue(jobs[1].meta["tested"])
            self.assertEqual(len(jobs[1].measurements), 1)
            assert_quantity_allclose(
                jobs[1].measurements[_metricName()].quantity,
                expectedValue)

    def testNoData(self, mockWriter, _mockButler, _mockMetricsLoader):
        datarefs = []
        self._checkMetric(mockWriter, datarefs, unitsOfWork=[])

    def testBadMetric(self, _mockWriter, _mockButler, _mockMetricsLoader):
        with self.assertRaises(FieldValidationError):
            self.config.measurers = ["totallyAndDefinitelyNotARealMetric"]

    def testCustomMetadata(self, _mockWriter, _mockButler, _mockMetricsLoader):
        dataIds = [{"visit": 42, "ccd": 101, "filter": "k"},
                   {"visit": 42, "ccd": 102, "filter": "k"}]
        datarefs = [_makeMockDataref(dataId) for dataId in dataIds]
        extraMetadata = {"test_protocol": 42}
        jobs = self.task.runDataRefs(datarefs, extraMetadata).jobs

        for job in jobs:
            self.assertTrue(job.meta["tested"])
            self.assertEqual(job.meta["test_protocol"],
                             extraMetadata["test_protocol"])