Example #1
0
 def setUp(self):
     self.root = makeTestTempDir(TESTDIR)
     self.testRepo = MetricTestRepo(self.root,
                                    configFile=os.path.join(
                                        TESTDIR,
                                        "config/basic/butler.yaml"))
     self.runner = LogCliRunner()
Example #2
0
 def _createRepo(cls):
     """Use the Click `testing` module to call the butler command line api
     to create a repository."""
     runner = LogCliRunner()
     result = runner.invoke(butlerCli, ["create", cls.root])
     # Classmethod so assertEqual does not work
     assert result.exit_code == 0, f"output: {result.output} exception: {result.exception}"
Example #3
0
 def test_callMock(self):
     """Test that a mocked subcommand calls the Mocker and can be verified.
     """
     runner = LogCliRunner(env=mockEnvVar)
     result = runner.invoke(butler.cli, ["create", "repo"])
     self.assertEqual(result.exit_code, 0, clickResultMsg(result))
     Mocker.mock.assert_called_with(repo="repo", seed_config=None, standalone=False, override=False,
                                    outfile=None)
Example #4
0
 def _registerInstrument(cls):
     """Use the Click `testing` module to call the butler command line api
     to register the instrument."""
     runner = LogCliRunner()
     result = runner.invoke(
         butlerCli,
         ["register-instrument", cls.root, cls.instrumentClassName])
     # Classmethod so assertEqual does not work
     assert result.exit_code == 0, f"output: {result.output} exception: {result.exception}"
Example #5
0
 def _writeCuratedCalibrations(self):
     """Use the Click `testing` module to call the butler command line api
     to write curated calibrations."""
     runner = LogCliRunner()
     result = runner.invoke(
         butlerCli,
         ["write-curated-calibrations", self.root, self.instrumentName])
     self.assertEqual(
         result.exit_code, 0,
         f"output: {result.output} exception: {result.exception}")
Example #6
0
    def testPhotodiode(self):
        """Test ingest to a repo with the exposure information will not raise.
        """
        # Ingest raw to provide exposure information.
        outputRun = "raw_ingest_" + self.id()
        runner = LogCliRunner()
        result = runner.invoke(
            butlerCli,
            [
                "ingest-raws",
                self.root,
                self.file,
                "--output-run",
                outputRun,
                "--ingest-task",
                self.rawIngestTask,
            ],
        )
        self.assertEqual(
            result.exit_code, 0,
            f"output: {result.output} exception: {result.exception}")

        # Ingest photodiode matching this exposure.
        runner = LogCliRunner()
        result = runner.invoke(
            butlerCli,
            [
                "ingest-photodiode",
                self.root,
                self.instrumentClassName,
                self.pdPath,
            ],
        )
        self.assertEqual(
            result.exit_code, 0,
            f"output: {result.output} exception: {result.exception}")

        # Confirm that we can retrieve the ingested photodiode, and
        # that it has the correct type.
        butler = Butler(self.root, run="LSSTCam/calib/photodiode")
        getResult = butler.get('photodiode', dataId=self.dataIds[0])
        self.assertIsInstance(getResult, PhotodiodeCalib)
    def testQueryDatasetTypes(self):
        self.maxDiff = None
        datasetName = "test"
        instrumentDimension = "instrument"
        visitDimension = "visit"
        storageClassName = "testDatasetType"
        expectedNotVerbose = AstropyTable((("test", ), ), names=("name", ))
        runner = LogCliRunner()
        with runner.isolated_filesystem():
            butlerCfg = Butler.makeRepo("here")
            butler = Butler(butlerCfg, writeable=True)
            storageClass = StorageClass(storageClassName)
            butler.registry.storageClasses.registerStorageClass(storageClass)
            dimensions = butler.registry.dimensions.extract(
                (instrumentDimension, visitDimension))
            datasetType = DatasetType(datasetName, dimensions, storageClass)
            butler.registry.registerDatasetType(datasetType)
            # check not-verbose output:
            result = runner.invoke(cli, ["query-dataset-types", "here"])
            self.assertEqual(result.exit_code, 0, clickResultMsg(result))
            self.assertAstropyTablesEqual(readTable(result.output),
                                          expectedNotVerbose)
            # check glob output:
            result = runner.invoke(cli, ["query-dataset-types", "here", "t*"])
            self.assertEqual(result.exit_code, 0, clickResultMsg(result))
            self.assertAstropyTablesEqual(readTable(result.output),
                                          expectedNotVerbose)
            # check verbose output:
            result = runner.invoke(
                cli, ["query-dataset-types", "here", "--verbose"])
            self.assertEqual(result.exit_code, 0, clickResultMsg(result))
            expected = AstropyTable(array((
                "test",
                "['band', 'instrument', 'physical_filter', 'visit_system', 'visit']",
                "testDatasetType")),
                                    names=("name", "dimensions",
                                           "storage class"))
            self.assertAstropyTablesEqual(readTable(result.output), expected)

            # Now remove and check that it was removed
            # First a non-existent one
            result = runner.invoke(cli,
                                   ["remove-dataset-type", "here", "unreal"])
            self.assertEqual(result.exit_code, 0, clickResultMsg(result))

            # Now one we now has been registered
            result = runner.invoke(
                cli, ["remove-dataset-type", "here", datasetName])
            self.assertEqual(result.exit_code, 0, clickResultMsg(result))

            # and check that it has gone
            result = runner.invoke(cli, ["query-dataset-types", "here"])
            self.assertEqual(result.exit_code, 0, clickResultMsg(result))
            self.assertIn("No results", result.output)
 def testGetCollections(self):
     run = "ingest/run"
     tag = "ingest"
     expected = {"collections": [run, tag]}
     runner = LogCliRunner()
     with runner.isolated_filesystem():
         butlerCfg = Butler.makeRepo("here")
         # the purpose of this call is to create some collections
         _ = Butler(butlerCfg, run=run, tags=[tag], collections=[tag])
         result = runner.invoke(cli, ["query-collections", "here"])
         self.assertEqual(expected, yaml.safe_load(result.output))
Example #9
0
    def setUp(self):
        """Setup for lightweight photodiode ingest task.

        This will create the repo and register the instrument.
        """
        self.root = tempfile.mkdtemp(dir=self.ingestDir)

        # Create Repo
        runner = LogCliRunner()
        result = runner.invoke(butlerCli, ["create", self.root])
        self.assertEqual(
            result.exit_code, 0,
            f"output: {result.output} exception: {result.exception}")

        # Register Instrument
        runner = LogCliRunner()
        result = runner.invoke(
            butlerCli,
            ["register-instrument", self.root, self.instrumentClassName])
        self.assertEqual(
            result.exit_code, 0,
            f"output: {result.output} exception: {result.exception}")
Example #10
0
 def testRetrieveSubset(self):
     runner = LogCliRunner()
     with runner.isolated_filesystem():
         destdir = "tmp1/"
         result = runner.invoke(cli, [
             "retrieve-artifacts", self.root, destdir, "--where",
             "instrument='DummyCamComp' AND visit=423"
         ])
         self.assertEqual(result.exit_code, 0, clickResultMsg(result))
         self.assertTrue(result.stdout.endswith(": 3\n"),
                         f"Expected 3 got: {result.stdout}")
         artifacts = self.find_files(destdir)
         self.assertEqual(len(artifacts), 3,
                          f"Expected 3 artifacts: {artifacts}")
Example #11
0
 def testPhotodiodeFailure(self):
     """Test ingest to a repo missing exposure information will raise.
     """
     runner = LogCliRunner()
     result = runner.invoke(
         butlerCli,
         [
             "ingest-photodiode",
             self.root,
             self.instrumentClassName,
             self.pdPath,
         ],
     )
     self.assertEqual(
         result.exit_code, 1,
         f"output: {result.output} exception: {result.exception}")
Example #12
0
    def testClobber(self):
        runner = LogCliRunner()
        with runner.isolated_filesystem():
            destdir = "tmp2/"
            result = runner.invoke(cli,
                                   ["retrieve-artifacts", self.root, destdir])
            self.assertEqual(result.exit_code, 0, clickResultMsg(result))

            # Running again should fail
            result = runner.invoke(cli,
                                   ["retrieve-artifacts", self.root, destdir])
            self.assertNotEqual(result.exit_code, 0, clickResultMsg(result))

            # But with clobber should pass
            result = runner.invoke(
                cli, ["retrieve-artifacts", self.root, destdir, "--clobber"])
            self.assertEqual(result.exit_code, 0, clickResultMsg(result))
Example #13
0
    def test_help(self):
        """Tests `utils.addArgumentHelp` and its use in repo_argument and
        directory_argument; verifies that the argument help gets added to the
        command fucntion help, and that it's added in the correct order. See
        addArgumentHelp for more details."""
        runner = LogCliRunner()
        result = runner.invoke(ArgumentHelpGeneratorTestCase.cli, ["--help"])
        expected = """Usage: cli [OPTIONS] REPO DIRECTORY

  The cli help message.

  repo help text

  directory help text

Options:
  --help  Show this message and exit.
"""
        self.assertIn(expected, result.output)
 def testQueryDatasetTypes(self):
     self.maxDiff = None
     datasetName = "test"
     instrumentDimension = "instrument"
     visitDimension = "visit"
     storageClassName = "testDatasetType"
     expectedNotVerbose = {"datasetTypes": [datasetName]}
     runner = LogCliRunner()
     with runner.isolated_filesystem():
         butlerCfg = Butler.makeRepo("here")
         butler = Butler(butlerCfg, writeable=True)
         storageClass = StorageClass(storageClassName)
         butler.registry.storageClasses.registerStorageClass(storageClass)
         dimensions = butler.registry.dimensions.extract(
             (instrumentDimension, visitDimension))
         datasetType = DatasetType(datasetName, dimensions, storageClass)
         butler.registry.registerDatasetType(datasetType)
         # check not-verbose output:
         result = runner.invoke(cli, ["query-dataset-types", "here"])
         self.assertEqual(result.exit_code, 0, clickResultMsg(result))
         self.assertEqual(expectedNotVerbose, yaml.safe_load(result.output))
         # check glob output:
         result = runner.invoke(cli, ["query-dataset-types", "here", "t*"])
         self.assertEqual(result.exit_code, 0, clickResultMsg(result))
         self.assertEqual(expectedNotVerbose, yaml.safe_load(result.output))
         # check verbose output:
         result = runner.invoke(
             cli, ["query-dataset-types", "here", "--verbose"])
         self.assertEqual(result.exit_code, 0, clickResultMsg(result))
         response = yaml.safe_load(result.output)
         # output dimension names contain all required dimensions, more than
         # the registered dimensions, so verify the expected components
         # individually.
         self.assertEqual(response["datasetTypes"][0]["name"], datasetName)
         self.assertEqual(response["datasetTypes"][0]["storageClass"],
                          storageClassName)
         self.assertIn(instrumentDimension,
                       response["datasetTypes"][0]["dimensions"])
         self.assertIn(visitDimension,
                       response["datasetTypes"][0]["dimensions"])
Example #15
0
    def _ingestRaws(self, transfer, file=None):
        """Use the Click `testing` module to call the butler command line api
        to ingest raws.

        Parameters
        ----------
        transfer : `str`
            The external data transfer type.
        file : `str`
            Path to a file to ingest instead of the default associated with
            the object.
        """
        if file is None:
            file = self.file
        runner = LogCliRunner()
        result = runner.invoke(butlerCli, [
            "ingest-raws", self.root, file, "--output-run", self.outputRun,
            "--transfer", transfer, "--ingest-task", self.rawIngestTask
        ])
        self.assertEqual(
            result.exit_code, 0,
            f"output: {result.output} exception: {result.exception}")
Example #16
0
    def testRetrieveAll(self):
        runner = LogCliRunner()
        with runner.isolated_filesystem():

            # When preserving the path the run will be in the directory along
            # with a . in the component name.  When not preserving paths the
            # filename will have an underscore rather than dot.
            for counter, (preserve_path, prefix) in enumerate(
                (("--preserve-path", "ingest/run/test_metric_comp."),
                 ("--no-preserve-path", "test_metric_comp_"))):
                destdir = f"tmp{counter}/"
                result = runner.invoke(
                    cli,
                    ["retrieve-artifacts", self.root, destdir, preserve_path])
                self.assertEqual(result.exit_code, 0, clickResultMsg(result))
                self.assertTrue(result.stdout.endswith(": 6\n"),
                                f"Expected 6 got: {result.stdout}")

                artifacts = self.find_files(destdir)
                self.assertEqual(len(artifacts), 6,
                                 f"Expected 6 artifacts: {artifacts}")
                self.assertIn(f"{destdir}{prefix}", str(artifacts[1]))
Example #17
0
 def test_cli(self):
     runner = LogCliRunner()
     with runner.isolated_filesystem():
         result = runner.invoke(butler.cli, ["create", "here"])
         self.assertEqual(
             result.exit_code, 0,
             f"output: {result.output} exception: {result.exception}")
         registerInstrumentArgs = [
             "register-instrument", "here", self.instrumentClassName
         ]
         if self.secondInstrumentClassName is not None:
             registerInstrumentArgs.append(self.secondInstrumentClassName)
         result = runner.invoke(butler.cli, registerInstrumentArgs)
         self.assertEqual(
             result.exit_code, 0,
             f"output: {result.output} exception: {result.exception}")
         result = runner.invoke(butler.cli, [
             "write-curated-calibrations", "here", self.instrumentName,
             "--collection", "collection"
         ])
         self.assertEqual(
             result.exit_code, 0,
             f"output: {result.output} exception: {result.exception}")
 def setUp(self):
     self.runner = LogCliRunner()
    def run_test(self,
                 mockPruneDatasets,
                 mockQueryDatasets_init,
                 mockQueryDatasets_getDatasets,
                 mockQueryDatasets_getTables,
                 cliArgs,
                 exMsgs,
                 exPruneDatasetsCallArgs,
                 exGetTablesCalled,
                 exQueryDatasetsCallArgs,
                 invokeInput=None,
                 exPruneDatasetsExitCode=0):
        """Execute the test.

        Makes a temporary repo, invokes ``prune-datasets``. Verifies expected
        output, exit codes, and mock calls.

        Parameters
        ----------
        mockPruneDatasets : `MagicMock`
            The MagicMock for the ``Butler.pruneDatasets`` function.
        mockQueryDatasets_init : `MagicMock`
            The MagicMock for the ``QueryDatasets.__init__`` function.
        mockQueryDatasets_getDatasets : `MagicMock`
            The MagicMock for the ``QueryDatasets.getDatasets`` function.
        mockQueryDatasets_getTables : `MagicMock`
            The MagicMock for the ``QueryDatasets.getTables`` function.
        cliArgs : `list` [`str`]
            The arguments to pass to the command line. Do not include the
            subcommand name or the repo.
        exMsgs : `list` [`str`] or None
            A list of text fragments that should appear in the text output
            after calling the CLI command, or None if no output should be
            produced.
        exPruneDatasetsCallArgs : `dict` [`str`, `Any`]
            The arguments that ``Butler.pruneDatasets`` should have been called
            with, or None if that function should not have been called.
        exGetTablesCalled : bool
            `True` if ``QueryDatasets.getTables`` should have been called, else
            `False`.
        exQueryDatasetsCallArgs : `dict` [`str`, `Any`]
            The arguments that ``QueryDatasets.__init__`` should have bene
            called with, or `None` if the function should not have been called.
        invokeInput : `str`, optional.
            As string to pass to the ``CliRunner.invoke`` `input` argument. By
            default None.
        exPruneDatasetsExitCode : `int`
            The expected exit code returned from invoking ``prune-datasets``.
        """
        runner = LogCliRunner()
        with runner.isolated_filesystem():
            # Make a repo so a butler can be created
            result = runner.invoke(butlerCli, ["create", self.repo])
            self.assertEqual(result.exit_code, 0, clickResultMsg(result))

            # Run the prune-datasets CLI command, this will call all of our
            # mocks:
            cliArgs = ["prune-datasets", self.repo] + cliArgs
            result = runner.invoke(butlerCli, cliArgs, input=invokeInput)
            self.assertEqual(result.exit_code, exPruneDatasetsExitCode, clickResultMsg(result))

            # Verify the Butler.pruneDatasets was called exactly once with
            # expected arguments. The datasets argument is the value returned
            # by QueryDatasets, which we've mocked with side effect
            # ``getDatasets()``.
            if exPruneDatasetsCallArgs:
                mockPruneDatasets.assert_called_once_with(**exPruneDatasetsCallArgs)
            else:
                mockPruneDatasets.assert_not_called()

            # Less critical, but do a quick verification that the QueryDataset
            # member function mocks were called, in this case we expect one
            # time each.
            if exQueryDatasetsCallArgs:
                mockQueryDatasets_init.assert_called_once_with(**exQueryDatasetsCallArgs)
            else:
                mockQueryDatasets_init.assert_not_called()
            # If Butler.pruneDatasets was not called, then
            # QueryDatasets.getDatasets also does not get called.
            if exPruneDatasetsCallArgs:
                mockQueryDatasets_getDatasets.assert_called_once()
            else:
                mockQueryDatasets_getDatasets.assert_not_called()
            if exGetTablesCalled:
                mockQueryDatasets_getTables.assert_called_once()
            else:
                mockQueryDatasets_getTables.assert_not_called()

            if exMsgs is None:
                self.assertEqual("", result.output)
            else:
                for expectedMsg in exMsgs:
                    self.assertIn(expectedMsg, result.output)
Example #20
0
 def setUp(self):
     self.runner = LogCliRunner()
     self.repo = "here"
Example #21
0
    def testShowPipeline(self):
        """Test showing the pipeline."""
        class ShowInfo:
            def __init__(self, show, expectedOutput):
                self.show = show
                self.expectedOutput = expectedOutput

            def __repr__(self):
                return f"ShowInfo({self.show}, {self.expectedOutput}"

        testdata = [
            ShowInfo(
                "pipeline", """description: anonymous
tasks:
  task:
    class: lsst.pipe.base.tests.simpleQGraph.AddTask
    config:
    - addend: '100'"""),
            ShowInfo(
                "config", """### Configuration for task `task'
# Flag to enable/disable metadata saving for a task, enabled by default.
config.saveMetadata=True

# amount to add
config.addend=100

# name for connection input
config.connections.input='add_dataset{in_tmpl}'

# name for connection output
config.connections.output='add_dataset{out_tmpl}'

# name for connection output2
config.connections.output2='add2_dataset{out_tmpl}'

# name for connection initout
config.connections.initout='add_init_output{out_tmpl}'

# Template parameter used to format corresponding field template parameter
config.connections.in_tmpl='_in'

# Template parameter used to format corresponding field template parameter
config.connections.out_tmpl='_out'"""),

            # history will contain machine-specific paths, TBD how to verify
            ShowInfo("history=task::addend", None),
            ShowInfo("tasks", "### Subtasks for task `AddTask'")
        ]

        for showInfo in testdata:
            runner = LogCliRunner()
            result = runner.invoke(pipetaskCli, [
                "build", "--task",
                "lsst.pipe.base.tests.simpleQGraph.AddTask:task", "--config",
                "task:addend=100", "--show", showInfo.show
            ])
            self.assertEqual(result.exit_code, 0, clickResultMsg(result))
            if showInfo.expectedOutput is not None:
                self.assertIn(showInfo.expectedOutput,
                              result.output,
                              msg=f"for {showInfo}")