Пример #1
0
    def testForwardToPython(self):
        """Test that `lsst.log` log messages can be forwarded to `logging`."""
        log.configure()

        # Without forwarding we only get python logger messages captured
        with self.assertLogs(level="WARNING") as cm:
            log.warn("lsst.log warning message that will not be forwarded to Python")
            logging.warning("Python logging message that will be captured")
        self.assertEqual(len(cm.output), 1)

        log.usePythonLogging()

        # With forwarding we get 2 logging messages captured
        with self.assertLogs(level="WARNING") as cm:
            log.warn("This is a warning from lsst log meant for python logging")
            logging.warning("Python warning log message to be captured")
        self.assertEqual(len(cm.output), 2)

        loggername = "newlogger"
        log2 = log.Log.getLogger(loggername)
        with self.assertLogs(level="INFO", logger=loggername):
            log2.info("Info message to non-root lsst logger")

        # Check that debug and info are working properly
        # This test should return a single log message
        with self.assertLogs(level="INFO", logger=loggername) as cm:
            log2.info("Second INFO message to non-root lsst logger")
            log.debug("Debug message to root lsst logger")

        self.assertEqual(len(cm.output), 1, f"Got output: {cm.output}")

        logging.shutdown()
    def test_readInFits(self):
        credFile = os.path.expanduser('~/.mysqlAuthLSST')
        if not os.path.isfile(credFile):
            log.warn("Required file with credentials '%s' not found.", credFile)
            return

        testFile = ("./tests/testData/imsim_886258731_R33_S21_C12_E000.fits.gz")
        self.assertTrue(isFitsExt('stuf.fits'))
        self.assertFalse(isFitsExt('thing.txt'))
        self.assertFalse(isFitsExt('item.tx.gz'))
        self.assertTrue(isFitsExt(testFile))
        self.assertTrue(isFits(testFile))

        # Destroy existing tables and re-create them
        dbDestroyCreate(credFile, "DELETE")

        # Open a connection to the database.
        metadataFits = MetadataFitsDb(credFile)

        # test a specific file
        self.assertFalse(metadataFits.isFileInDb(testFile))
        metadataFits.insertFile(testFile)
        log.info(metadataFits.showColumnsInTables())
        self.assertTrue(metadataFits.isFileInDb(testFile))

        # test crawler
        rootDir = '~/test_md'
        rootDir = os.path.expanduser(rootDir)
        if not os.path.exists(rootDir):
            log.error("Data directory {} is required".format(rootDir))
            return
        directoryCrawl(rootDir, metadataFits)
Пример #3
0
    def test_readInFits(self):
        credFile = os.path.expanduser('~/.mysqlAuthLSST')
        if not os.path.isfile(credFile):
            log.warn("Required file with credentials '%s' not found.",
                     credFile)
            return

        testFile = (
            "./tests/testData/imsim_886258731_R33_S21_C12_E000.fits.gz")
        self.assertTrue(isFitsExt('stuf.fits'))
        self.assertFalse(isFitsExt('thing.txt'))
        self.assertFalse(isFitsExt('item.tx.gz'))
        self.assertTrue(isFitsExt(testFile))
        self.assertTrue(isFits(testFile))

        # Destroy existing tables and re-create them
        dbDestroyCreate(credFile, "DELETE")

        # Open a connection to the database.
        metadataFits = MetadataFitsDb(credFile)

        # test a specific file
        self.assertFalse(metadataFits.isFileInDb(testFile))
        metadataFits.insertFile(testFile)
        log.info(metadataFits.showColumnsInTables())
        self.assertTrue(metadataFits.isFileInDb(testFile))

        # test crawler
        rootDir = '~/test_md'
        rootDir = os.path.expanduser(rootDir)
        if not os.path.exists(rootDir):
            log.error("Data directory {} is required".format(rootDir))
            return
        directoryCrawl(rootDir, metadataFits)
Пример #4
0
    def testRedir(self):
        """
        Test redirection to stream.
        """
        with TestRedir.StdoutCapture(self.outputFilename):
            log.configure()
            dest = io.StringIO()
            log_utils.enable_notebook_logging(dest)
            log.log(log.getDefaultLogger().getName(), log.INFO, "This is INFO")
            log.info(u"This is unicode INFO")
            log.trace("This is TRACE")
            log.debug("This is DEBUG")
            log.warn("This is WARN")
            log.error("This is ERROR")
            log.fatal("This is FATAL")
            log_utils.disable_notebook_logging()
            log.warn("Format %d %g %s", 3, 2.71828, "foo")
        self.assertEqual(
            dest.getvalue(),
            """root INFO: This is INFO
root INFO: This is unicode INFO
root WARN: This is WARN
root ERROR: This is ERROR
root FATAL: This is FATAL
""",
        )
        self.check(
            """
root WARN: Format 3 2.71828 foo
"""
        )
Пример #5
0
    def testBasic(self):
        """
        Test basic log output with default configuration.
        Since the default threshold is INFO, the DEBUG or TRACE
        message is not emitted.
        """
        with TestLog.StdoutCapture(self.outputFilename):
            log.configure()
            log.log(log.getDefaultLogger(), log.INFO, "This is INFO")
            log.info(u"This is unicode INFO")
            log.trace("This is TRACE")
            log.debug("This is DEBUG")
            log.warn("This is WARN")
            log.error("This is ERROR")
            log.fatal("This is FATAL")
            log.critical("This is CRITICAL")
            log.warning("Format %d %g %s", 3, 2.71828, "foo")
        self.check("""
root INFO: This is INFO
root INFO: This is unicode INFO
root WARN: This is WARN
root ERROR: This is ERROR
root FATAL: This is FATAL
root FATAL: This is CRITICAL
root WARN: Format 3 2.71828 foo
""")
Пример #6
0
    def testForwardToPython(self):
        """Test that `lsst.log` log messages can be forwarded to `logging`."""
        log.configure()

        # Without forwarding we only get python logger messages captured
        with self.assertLogs(level="WARNING") as cm:
            log.warn(
                "lsst.log warning message that will not be forwarded to Python"
            )
            logging.warning("Python logging message that will be captured")
        self.assertEqual(len(cm.output), 1)

        log.usePythonLogging()

        # With forwarding we get 2 logging messages captured
        with self.assertLogs(level="WARNING") as cm:
            log.warn(
                "This is a warning from lsst log meant for python logging")
            logging.warning("Python warning log message to be captured")
        self.assertEqual(len(cm.output), 2)

        loggername = "newlogger"
        log2 = log.Log.getLogger(loggername)
        with self.assertLogs(level="INFO", logger=loggername):
            log2.info("Info message to non-root lsst logger")

        # Check that debug and info are working properly
        # This test should return a single log message
        with self.assertLogs(level="INFO", logger=loggername) as cm:
            log2.info("Second INFO message to non-root lsst logger")
            log.debug("Debug message to root lsst logger")

        self.assertEqual(len(cm.output), 1, f"Got output: {cm.output}")

        logging.shutdown()
Пример #7
0
    def getCcdImage(self, ccd, imageFactory=afwImage.ImageF, binSize=1, asMaskedImage=False):
        """Return an image of the specified ccd, and also the (possibly updated) ccd"""

        log = lsst.log.Log.getLogger("afw.cameraGeom.utils.ButlerImage")

        if self.isTrimmed:
            bbox = ccd.getBBox()
        else:
            bbox = calcRawCcdBBox(ccd)

        im = None
        if self.butler is not None:
            err = None
            for dataId in [dict(detector=ccd.getId()), dict(ccd=ccd.getId()), dict(ccd=ccd.getName())]:
                try:
                    im = self.butler.get(self.type, dataId, **self.kwargs)
                except FitsError as e:  # no point trying another dataId
                    err = IOError(e.args[0].split('\n')[0])  # It's a very chatty error
                    break
                except Exception as e:  # try a different dataId
                    if err is None:
                        err = e
                    continue
                else:
                    ccd = im.getDetector()  # possibly modified by assembleCcdTask
                    break

            if im:
                if asMaskedImage:
                    im = im.getMaskedImage()
                else:
                    im = im.getMaskedImage().getImage()
            else:
                if self.verbose:
                    # Lost by jupyterlab.
                    print(f"Reading {ccd.getId()}: {err}")

                log.warn(f"Reading {ccd.getId()}: {err}")

        if im is None:
            return self._prepareImage(ccd, imageFactory(*bbox.getDimensions()), binSize), ccd

        if self.type == "raw":
            if hasattr(im, 'convertF'):
                im = im.convertF()
            if False and self.callback is None:   # we need to trim the raw image
                self.callback = rawCallback

        allowRotate = True
        if self.callback:
            try:
                im = self.callback(im, ccd, imageSource=self)
            except Exception as e:
                if self.verbose:
                    log.error(f"callback failed: {e}")
                im = imageFactory(*bbox.getDimensions())
            else:
                allowRotate = False     # the callback was responsible for any rotations

        return self._prepareImage(ccd, im, binSize, allowRotate=allowRotate), ccd
def main():
    args = _Parser().parse_args()
    log = lsst.log.Log.getLogger("add_gen3_repo")

    # To convert consistently, don't use any previous output
    dataset = ap_verify.dataset.Dataset(args.dataset)
    gen3_repo = os.path.join(dataset.datasetRoot, "preloaded")
    if os.path.exists(gen3_repo):
        log.warn("Clearing out %s and making it from scratch...", gen3_repo)
        shutil.rmtree(gen3_repo)
    os.makedirs(gen3_repo)

    mode = "copy" if args.drop_gen2 else "relsymlink"

    log.info("Converting templates...")
    gen2_templates = dataset.templateLocation
    _migrate_gen2_to_gen3(dataset, gen2_templates, None, gen3_repo, mode,
                          config_file="convertRepo_templates.py")

    log.info("Converting calibs...")
    with tempfile.TemporaryDirectory() as tmp:
        workspace = ap_verify.workspace.Workspace(tmp)
        ap_verify.ingestion.ingestDataset(dataset, workspace)

        gen2_repo = workspace.dataRepo
        gen2_calibs = workspace.calibRepo
        # Files stored in the Gen 2 part of the dataset, can be safely linked
        _migrate_gen2_to_gen3(dataset, gen2_repo, gen2_calibs, gen3_repo, mode,
                              config_file="convertRepo_calibs.py")
        # Our refcats and defects are temporary files, and must not be linked
        _migrate_gen2_to_gen3(dataset, gen2_repo, gen2_calibs, gen3_repo, mode="copy",
                              config_file="convertRepo_copied.py")

    log.info("Exporting Gen 3 registry to configure new repos...")
    _export_for_copy(dataset, gen3_repo)
Пример #9
0
    def getCcdImage(self, ccd, imageFactory=afwImage.ImageF, binSize=1, asMaskedImage=False):
        """Return an image of the specified ccd, and also the (possibly updated) ccd"""

        log = lsst.log.Log.getLogger("afw.cameraGeom.utils.ButlerImage")

        if self.isTrimmed:
            bbox = ccd.getBBox()
        else:
            bbox = calcRawCcdBBox(ccd)

        im = None
        if self.butler is not None:
            err = None
            for dataId in [dict(detector=ccd.getId()), dict(ccd=ccd.getId()), dict(ccd=ccd.getName())]:
                try:
                    im = self.butler.get(self.type, dataId, **self.kwargs)
                except FitsError as e:  # no point trying another dataId
                    err = IOError(e.args[0].split('\n')[0])  # It's a very chatty error
                    break
                except Exception as e:  # try a different dataId
                    if err is None:
                        err = e
                    continue
                else:
                    ccd = im.getDetector()  # possibly modified by assembleCcdTask
                    break

            if im:
                if asMaskedImage:
                    im = im.getMaskedImage()
                else:
                    im = im.getMaskedImage().getImage()
            else:
                if self.verbose:
                    print("Reading %s: %s" % (ccd.getId(), err))  # lost by jupyterLab

                log.warn("Reading %s: %s", ccd.getId(), err)

        if im is None:
            return self._prepareImage(ccd, imageFactory(*bbox.getDimensions()), binSize), ccd

        if self.type == "raw":
            if hasattr(im, 'convertF'):
                im = im.convertF()
            if False and self.callback is None:   # we need to trim the raw image
                self.callback = rawCallback

        allowRotate = True
        if self.callback:
            try:
                im = self.callback(im, ccd, imageSource=self)
            except Exception as e:
                if self.verbose:
                    log.error("callback failed: %s" % e)
                im = imageFactory(*bbox.getDimensions())
            else:
                allowRotate = False     # the callback was responsible for any rotations

        return self._prepareImage(ccd, im, binSize, allowRotate=allowRotate), ccd
Пример #10
0
 def validate(self):
     CoaddBaseTask.ConfigClass.validate(self)
     if not self.makePsfMatched and not self.makeDirect:
         raise RuntimeError("At least one of config.makePsfMatched and config.makeDirect must be True")
     if self.doPsfMatch:
         # Backwards compatibility.
         log.warn("Config doPsfMatch deprecated. Setting makePsfMatched=True and makeDirect=False")
         self.makePsfMatched = True
         self.makeDirect = False
Пример #11
0
 def validate(self):
     CoaddBaseTask.ConfigClass.validate(self)
     if not self.makePsfMatched and not self.makeDirect:
         raise RuntimeError("At least one of config.makePsfMatched and config.makeDirect must be True")
     if self.doPsfMatch:
         # Backwards compatibility.
         log.warn("Config doPsfMatch deprecated. Setting makePsfMatched=True and makeDirect=False")
         self.makePsfMatched = True
         self.makeDirect = False
Пример #12
0
    def runPipeline(self, graph, butler, args):
        """
        Parameters
        ----------
        graph : `QuantumGraph`
            Execution graph.
        butler : `Butler`
            data butler instance
        args : `argparse.Namespace`
            Parsed command line
        """

        # how many processes do we want
        numProc = args.processes

        # pre-flight check
        for taskNodes in graph:
            taskDef, quanta = taskNodes.taskDef, taskNodes.quanta
            task = self.taskFactory.makeTask(taskDef.taskClass, taskDef.config,
                                             None, butler)
            if not self.precall(task, butler, args):
                # non-zero means failure
                return 1

            if numProc > 1 and not taskDef.taskClass.canMultiprocess:
                lsstLog.warn(
                    "Task %s does not support multiprocessing; using one process",
                    taskDef.taskName)
                numProc = 1

        # chose map function being simple sequential map or multi-process map
        if numProc > 1:
            timeout = getattr(args, 'timeout', None)
            if timeout is None or timeout <= 0:
                timeout = self.MP_TIMEOUT
            mapFunc = _MPMap(numProc, timeout)
        else:

            def _mapFunc(func, iterable):
                """Call function for all items sequentially"""
                return [func(item) for item in iterable]

            mapFunc = _mapFunc

        # tasks are executed sequentially but quanta can run in parallel
        for taskNodes in graph:
            taskDef, quanta = taskNodes.taskDef, taskNodes.quanta
            # targets for map function
            target_list = [(taskDef.taskClass, taskDef.config, quantum, butler)
                           for quantum in quanta]
            # call task on each argument in a list
            profile_name = getattr(args, "profile", None)
            with util.profile(profile_name, lsstLog):
                mapFunc(self._executeSuperTask, target_list)
Пример #13
0
 def validate(self):
     pexConfig.Config.validate(self)
     if not self.makePsfMatched and not self.makeDirect:
         raise RuntimeError(
             "At least one of config.makePsfMatched and config.makeDirect must be True"
         )
     if self.doPsfMatch:
         # Courtesy backwards compatibility.
         # Configs do not have loggers
         log.warn(
             "Config doPsfMatch deprecated. Setting makePsfMatched=True and makeDirect=False"
         )
         self.makePsfMatched = True
         self.makeDirect = False
Пример #14
0
    def testForwardToPythonAppenderWithMDC(self):
        """Test that `log4cxx` appender forwards it all to logging and modifies
        message with MDC info"""
        self.configure("""
log4j.rootLogger=DEBUG, PyLog
log4j.appender.PyLog = PyLogAppender
log4j.appender.PyLog.MessagePattern = %m (LABEL=%X{{LABEL}})
""")
        log.MDC("LABEL", "some.task")
        with self.assertLogs(level="WARNING") as cm:
            log.warn("lsst.log: forwarded")
        log.MDCRemove("LABEL")
        self.assertEqual(len(cm.records), 1)
        self.assertEqual(cm.records[0].MDC, {"LABEL": "some.task"})
        self.assertEqual(cm.records[0].msg,
                         "lsst.log: forwarded (LABEL=some.task)")
Пример #15
0
    def makeWcs(self, visitInfo, detector):
        """Create a SkyWcs from information about the exposure.

        If VisitInfo is not None, use it and the detector to create a SkyWcs,
        otherwise return the metadata-based SkyWcs (always created, so that
        the relevant metadata keywords are stripped).

        Parameters
        ----------
        visitInfo : `~lsst.afw.image.VisitInfo`
            The information about the telescope boresight and camera
            orientation angle for this exposure.
        detector : `~lsst.afw.cameraGeom.Detector`
            The detector used to acquire this exposure.

        Returns
        -------
        skyWcs : `~lsst.afw.geom.SkyWcs`
            Reversible mapping from pixel coordinates to sky coordinates.

        Raises
        ------
        InitialSkyWcsError
            Raised if there is an error generating the SkyWcs, chained from the
            lower-level exception if available.
        """
        if not self.isOnSky():
            # This is not an on-sky observation
            return None

        skyWcs = self._createSkyWcsFromMetadata()

        log = lsst.log.Log.getLogger("fitsRawFormatter")
        if visitInfo is None:
            msg = "No VisitInfo; cannot access boresight information. Defaulting to metadata-based SkyWcs."
            log.warn(msg)
            if skyWcs is None:
                raise InitialSkyWcsError(
                    "Failed to create both metadata and boresight-based SkyWcs."
                    "See warnings in log messages for details.")
            return skyWcs

        return self.makeRawSkyWcsFromBoresight(
            visitInfo.getBoresightRaDec(), visitInfo.getBoresightRotAngle(),
            detector)
Пример #16
0
    def testForwardToPythonAppender(self):
        """Test that `log4cxx` appender forwards it all to logging"""
        self.configure("""
log4j.rootLogger=DEBUG, PyLog
log4j.appender.PyLog = PyLogAppender
""")
        with self.assertLogs(level="WARNING") as cm:
            log.warn("lsst.log: forwarded")
            logging.warning("Python logging: also captured")
        self.assertEqual(len(cm.output), 2)

        # check that MDC is stored in LogRecord
        log.MDC("LABEL", "some.task")
        with self.assertLogs(level="WARNING") as cm:
            log.warn("lsst.log: forwarded")
        log.MDCRemove("LABEL")
        self.assertEqual(len(cm.records), 1)
        self.assertEqual(cm.records[0].MDC, {"LABEL": "some.task"})
        self.assertEqual(cm.records[0].msg, "lsst.log: forwarded")
Пример #17
0
    def _createSkyWcsFromMetadata(self):
        """Create a SkyWcs from the FITS header metadata in an Exposure.

        Returns
        -------
        skyWcs: `lsst.afw.geom.SkyWcs`, or None
            The WCS that was created from ``self.metadata``, or None if that
            creation fails due to invalid metadata.
        """
        if not self.isOnSky():
            # This is not an on-sky observation
            return None

        try:
            return lsst.afw.geom.makeSkyWcs(self.metadata, strip=True)
        except TypeError as e:
            log = lsst.log.Log.getLogger("fitsRawFormatter")
            log.warn("Cannot create a valid WCS from metadata: %s", e.args[0])
            return None
Пример #18
0
    def testForwardToPythonContextManager(self):
        """Test that `lsst.log` log messages can be forwarded to `logging`
        using context manager"""
        log.configure()

        # Without forwarding we only get python logger messages captured
        with self.assertLogs(level="WARNING") as cm:
            log.warning("lsst.log: not forwarded")
            logging.warning("Python logging: captured")
        self.assertEqual(len(cm.output), 1)

        # Temporarily turn on forwarding
        with log.UsePythonLogging():
            with self.assertLogs(level="WARNING") as cm:
                log.warn("lsst.log: forwarded")
                logging.warning("Python logging: also captured")
            self.assertEqual(len(cm.output), 2)

        # Verify that forwarding is disabled
        self.assertFalse(log.Log.UsePythonLogging)
Пример #19
0
    def testForwardToPythonContextManager(self):
        """Test that `lsst.log` log messages can be forwarded to `logging`
        using context manager"""
        log.configure()

        # Without forwarding we only get python logger messages captured
        with self.assertLogs(level="WARNING") as cm:
            log.warning("lsst.log: not forwarded")
            logging.warning("Python logging: captured")
        self.assertEqual(len(cm.output), 1)

        # Temporarily turn on forwarding
        with log.UsePythonLogging():
            with self.assertLogs(level="WARNING") as cm:
                log.warn("lsst.log: forwarded")
                logging.warning("Python logging: also captured")
            self.assertEqual(len(cm.output), 2)

        # Verify that forwarding is disabled
        self.assertFalse(log.Log.UsePythonLogging)
Пример #20
0
    def testBasic(self):
        """
        Test basic log output.  Since the default threshold is INFO, the
        TRACE message is not emitted.
        """
        with TestLog.StdoutCapture(self.outputFilename):
            log.configure()
            log.trace("This is TRACE")
            log.info("This is INFO")
            log.debug("This is DEBUG")
            log.warn("This is WARN")
            log.error("This is ERROR")
            log.fatal("This is FATAL")
            log.info("Format %d %g %s", 3, 2.71828, "foo")
        self.check("""
 INFO root null - This is INFO
 DEBUG root null - This is DEBUG
 WARN root null - This is WARN
 ERROR root null - This is ERROR
 FATAL root null - This is FATAL
 INFO root null - Format 3 2.71828 foo
""")
Пример #21
0
def main():
    args = _Parser().parse_args()
    log = lsst.log.Log.getLogger("add_gen3_repo")

    # To convert consistently, don't use any previous output
    dataset = ap_verify.dataset.Dataset("test")
    gen3_repo = os.path.join(dataset.datasetRoot, "preloaded")
    if os.path.exists(gen3_repo):
        log.warn("Clearing out %s and making it from scratch...", gen3_repo)
        shutil.rmtree(gen3_repo)
    os.makedirs(gen3_repo)

    mode = "copy" if args.drop_gen2 else "relsymlink"

    log.info("Converting calibs...")
    with tempfile.TemporaryDirectory() as tmp:
        workspace = ap_verify.workspace.WorkspaceGen2(tmp)
        ap_verify.ingestion.ingestDataset(dataset, workspace)

        gen2_repo = workspace.dataRepo
        gen2_calibs = workspace.calibRepo
        # Files stored in the Gen 2 part of the dataset, can be safely linked
        _migrate_gen2_to_gen3(dataset, gen2_repo, gen2_calibs, gen3_repo, mode,
                              curated=True,
                              config_file="convertRepo_calibs.py")
        # Our refcats and defects are temporary files, and must not be linked
        _migrate_gen2_to_gen3(dataset, gen2_repo, gen2_calibs, gen3_repo, mode="copy",
                              curated=False,
                              config_file="convertRepo_copied.py")

    # ap_verify assumes specific collections are present
    log.info("Adding unpopulated collections...")
    butler = daf_butler.Butler(gen3_repo, writeable=True)
    butler.registry.registerCollection("skymaps", daf_butler.CollectionType.RUN)
    butler.registry.registerCollection("templates/deep", daf_butler.CollectionType.RUN)

    log.info("Exporting Gen 3 registry to configure new repos...")
    _export_for_copy(dataset, gen3_repo)
Пример #22
0
    def testBasic(self):
        """
        Test basic log output with default configuration.
        Since the default threshold is INFO, the DEBUG or TRACE
        message is not emitted.
        """
        with TestLog.StdoutCapture(self.outputFilename):
            log.configure()
            log.log(log.getDefaultLoggerName(), log.INFO, "This is INFO")
            log.info(u"This is unicode INFO")
            log.trace("This is TRACE")
            log.debug("This is DEBUG")
            log.warn("This is WARN")
            log.error("This is ERROR")
            log.fatal("This is FATAL")
            log.warn("Format %d %g %s", 3, 2.71828, "foo")
        self.check("""
root INFO: This is INFO
root INFO: This is unicode INFO
root WARN: This is WARN
root ERROR: This is ERROR
root FATAL: This is FATAL
root WARN: Format 3 2.71828 foo
""")
Пример #23
0
    def testForwardToPythonAppenderFormatMDC(self):
        """Test that we can format `log4cxx` MDC on Python side"""

        # remember old record factory
        old_factory = logging.getLogRecordFactory()

        # configure things using convenience method
        log.configure_pylog_MDC("INFO")

        with self.assertLogs(level="WARNING") as cm:
            log.warn("lsst.log: forwarded 1")
            log.MDC("LABEL", "task1")
            log.warn("lsst.log: forwarded 2")
            log.MDC("LABEL-X", "task2")
            log.warn("lsst.log: forwarded 3")
            logging.warning("Python logging: also captured")
            log.MDCRemove("LABEL")
            log.MDCRemove("LABEL-X")
        self.assertEqual(len(cm.records), 4)

        # restore factory
        logging.setLogRecordFactory(old_factory)

        # %-style formatting, only works on whole MDC
        formatter = logging.Formatter(fmt="%(levelname)s:%(name)s:%(message)s",
                                      style="%")
        self.assertEqual(formatter.format(cm.records[0]),
                         "WARNING:root:lsst.log: forwarded 1")
        formatter = logging.Formatter(
            fmt="%(levelname)s:%(name)s:%(message)s:%(MDC)s", style="%")
        self.assertEqual(formatter.format(cm.records[0]),
                         "WARNING:root:lsst.log: forwarded 1:{}")
        self.assertEqual(formatter.format(cm.records[1]),
                         "WARNING:root:lsst.log: forwarded 2:{LABEL=task1}")
        self.assertEqual(
            formatter.format(cm.records[2]),
            "WARNING:root:lsst.log: forwarded 3:{LABEL=task1, LABEL-X=task2}")
        self.assertEqual(formatter.format(cm.records[3]),
                         "WARNING:root:Python logging: also captured:{}")

        # format-style formatting, without MDC first
        formatter = logging.Formatter(fmt="{levelname}:{name}:{message}",
                                      style="{")
        self.assertEqual(formatter.format(cm.records[0]),
                         "WARNING:root:lsst.log: forwarded 1")

        # format-style formatting, with full MDC
        formatter = logging.Formatter(fmt="{levelname}:{name}:{message}:{MDC}",
                                      style="{")
        self.assertEqual(formatter.format(cm.records[0]),
                         "WARNING:root:lsst.log: forwarded 1:{}")
        self.assertEqual(formatter.format(cm.records[1]),
                         "WARNING:root:lsst.log: forwarded 2:{LABEL=task1}")
        self.assertEqual(
            formatter.format(cm.records[2]),
            "WARNING:root:lsst.log: forwarded 3:{LABEL=task1, LABEL-X=task2}")
        self.assertEqual(formatter.format(cm.records[3]),
                         "WARNING:root:Python logging: also captured:{}")

        # format-style, using index access to MDC items, works for almost any
        # item names
        formatter = logging.Formatter(
            fmt="{levelname}:{name}:{message}:{MDC[LABEL-X]}", style="{")
        self.assertEqual(formatter.format(cm.records[0]),
                         "WARNING:root:lsst.log: forwarded 1:")
        self.assertEqual(formatter.format(cm.records[1]),
                         "WARNING:root:lsst.log: forwarded 2:")
        self.assertEqual(formatter.format(cm.records[2]),
                         "WARNING:root:lsst.log: forwarded 3:task2")
        self.assertEqual(formatter.format(cm.records[3]),
                         "WARNING:root:Python logging: also captured:")
Пример #24
0
def plotPsfSpatialModel(exposure, psf, psfCellSet, showBadCandidates=True, numSample=128,
                        matchKernelAmplitudes=False, keepPlots=True):
    """Plot the PSF spatial model."""

    log = lsst.log.Log.getLogger("utils.plotPsfSpatialModel")
    try:
        import matplotlib.pyplot as plt
        import matplotlib as mpl
    except ImportError as e:
        log.warn("Unable to import matplotlib: %s", e)
        return

    noSpatialKernel = psf.getKernel()
    candPos = list()
    candFits = list()
    badPos = list()
    badFits = list()
    candAmps = list()
    badAmps = list()
    for cell in psfCellSet.getCellList():
        for cand in cell.begin(False):
            if not showBadCandidates and cand.isBad():
                continue
            candCenter = lsst.geom.PointD(cand.getXCenter(), cand.getYCenter())
            try:
                im = cand.getMaskedImage()
            except Exception:
                continue

            fit = fitKernelParamsToImage(noSpatialKernel, im, candCenter)
            params = fit[0]
            kernels = fit[1]
            amp = 0.0
            for p, k in zip(params, kernels):
                amp += p * k.getSum()

            targetFits = badFits if cand.isBad() else candFits
            targetPos = badPos if cand.isBad() else candPos
            targetAmps = badAmps if cand.isBad() else candAmps

            targetFits.append([x / amp for x in params])
            targetPos.append(candCenter)
            targetAmps.append(amp)

    xGood = numpy.array([pos.getX() for pos in candPos]) - exposure.getX0()
    yGood = numpy.array([pos.getY() for pos in candPos]) - exposure.getY0()
    zGood = numpy.array(candFits)

    xBad = numpy.array([pos.getX() for pos in badPos]) - exposure.getX0()
    yBad = numpy.array([pos.getY() for pos in badPos]) - exposure.getY0()
    zBad = numpy.array(badFits)
    numBad = len(badPos)

    xRange = numpy.linspace(0, exposure.getWidth(), num=numSample)
    yRange = numpy.linspace(0, exposure.getHeight(), num=numSample)

    kernel = psf.getKernel()
    nKernelComponents = kernel.getNKernelParameters()
    #
    # Figure out how many panels we'll need
    #
    nPanelX = int(math.sqrt(nKernelComponents))
    nPanelY = nKernelComponents//nPanelX
    while nPanelY*nPanelX < nKernelComponents:
        nPanelX += 1

    fig = plt.figure(1)
    fig.clf()
    try:
        fig.canvas._tkcanvas._root().lift()  # == Tk's raise, but raise is a python reserved word
    except Exception:                                  # protect against API changes
        pass
    #
    # Generator for axes arranged in panels
    #
    mpl.rcParams["figure.titlesize"] = "x-small"
    subplots = makeSubplots(fig, 2, 2, Nx=nPanelX, Ny=nPanelY, xgutter=0.06, ygutter=0.06, pygutter=0.04)

    for k in range(nKernelComponents):
        func = kernel.getSpatialFunction(k)
        dfGood = zGood[:, k] - numpy.array([func(pos.getX(), pos.getY()) for pos in candPos])
        yMin = dfGood.min()
        yMax = dfGood.max()
        if numBad > 0:
            dfBad = zBad[:, k] - numpy.array([func(pos.getX(), pos.getY()) for pos in badPos])
            yMin = min([yMin, dfBad.min()])
            yMax = max([yMax, dfBad.max()])
        yMin -= 0.05 * (yMax - yMin)
        yMax += 0.05 * (yMax - yMin)

        yMin = -0.01
        yMax = 0.01

        fRange = numpy.ndarray((len(xRange), len(yRange)))
        for j, yVal in enumerate(yRange):
            for i, xVal in enumerate(xRange):
                fRange[j][i] = func(xVal, yVal)

        ax = next(subplots)

        ax.set_autoscale_on(False)
        ax.set_xbound(lower=0, upper=exposure.getHeight())
        ax.set_ybound(lower=yMin, upper=yMax)
        ax.plot(yGood, dfGood, 'b+')
        if numBad > 0:
            ax.plot(yBad, dfBad, 'r+')
        ax.axhline(0.0)
        ax.set_title('Residuals(y)')

        ax = next(subplots)

        if matchKernelAmplitudes and k == 0:
            vmin = 0.0
            vmax = 1.1
        else:
            vmin = fRange.min()
            vmax = fRange.max()

        norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
        im = ax.imshow(fRange, aspect='auto', origin="lower", norm=norm,
                       extent=[0, exposure.getWidth()-1, 0, exposure.getHeight()-1])
        ax.set_title('Spatial poly')
        plt.colorbar(im, orientation='horizontal', ticks=[vmin, vmax])

        ax = next(subplots)
        ax.set_autoscale_on(False)
        ax.set_xbound(lower=0, upper=exposure.getWidth())
        ax.set_ybound(lower=yMin, upper=yMax)
        ax.plot(xGood, dfGood, 'b+')
        if numBad > 0:
            ax.plot(xBad, dfBad, 'r+')
        ax.axhline(0.0)
        ax.set_title('K%d Residuals(x)' % k)

        ax = next(subplots)

        photoCalib = exposure.getPhotoCalib()
        # If there is no calibration factor, use 1.0.
        if photoCalib.getCalibrationMean() <= 0:
            photoCalib = afwImage.PhotoCalib(1.0)

        ampMag = [photoCalib.instFluxToMagnitude(candAmp) for candAmp in candAmps]
        ax.plot(ampMag, zGood[:, k], 'b+')
        if numBad > 0:
            badAmpMag = [photoCalib.instFluxToMagnitude(badAmp) for badAmp in badAmps]
            ax.plot(badAmpMag, zBad[:, k], 'r+')

        ax.set_title('Flux variation')

    fig.show()

    global keptPlots
    if keepPlots and not keptPlots:
        # Keep plots open when done
        def show():
            print("%s: Please close plots when done." % __name__)
            try:
                plt.show()
            except Exception:
                pass
            print("Plots closed, exiting...")
        import atexit
        atexit.register(show)
        keptPlots = True
Пример #25
0
def makeSubplots(fig, nx=2, ny=2, Nx=1, Ny=1, plottingArea=(0.1, 0.1, 0.85, 0.80),
                 pxgutter=0.05, pygutter=0.05, xgutter=0.04, ygutter=0.04,
                 headroom=0.0, panelBorderWeight=0, panelColor='black'):
    """Return a generator of a set of subplots, a set of Nx*Ny panels of nx*ny plots.  Each panel is fully
    filled by row (starting in the bottom left) before the next panel is started.  If panelBorderWidth is
    greater than zero a border is drawn around each panel, adjusted to enclose the axis labels.

    E.g.
    subplots = makeSubplots(fig, 2, 2, Nx=1, Ny=1, panelColor='k')
    ax = subplots.next(); ax.text(0.3, 0.5, '[0, 0] (0,0)')
    ax = subplots.next(); ax.text(0.3, 0.5, '[0, 0] (1,0)')
    ax = subplots.next(); ax.text(0.3, 0.5, '[0, 0] (0,1)')
    ax = subplots.next(); ax.text(0.3, 0.5, '[0, 0] (1,1)')
    fig.show()

    @param fig    The matplotlib figure to draw
    @param nx     The number of plots in each row of each panel
    @param ny     The number of plots in each column of each panel
    @param Nx     The number of panels in each row of the figure
    @param Ny     The number of panels in each column of the figure
    @param plottingArea  (x0, y0, x1, y1) for the part of the figure containing all the panels
    @param pxgutter Spacing between columns of panels in units of (x1 - x0)
    @param pygutter Spacing between rows of panels in units of (y1 - y0)
    @param xgutter  Spacing between columns of plots within a panel in units of (x1 - x0)
    @param ygutter  Spacing between rows of plots within a panel in units of (y1 - y0)
    @param headroom Extra spacing above each plot for e.g. a title
    @param panelBorderWeight Width of border drawn around panels
    @param panelColor Colour of border around panels
    """

    log = lsst.log.Log.getLogger("utils.makeSubplots")
    try:
        import matplotlib.pyplot as plt
    except ImportError as e:
        log.warn("Unable to import matplotlib: %s", e)
        return

    # Make show() call canvas.draw() too so that we know how large the axis labels are.  Sigh
    try:
        fig.__show
    except AttributeError:
        fig.__show = fig.show

        def myShow(fig):
            fig.__show()
            fig.canvas.draw()

        import types
        fig.show = types.MethodType(myShow, fig)
    #
    # We can't get the axis sizes until after draw()'s been called, so use a callback  Sigh^2
    #
    axes = {}                           # all axes in all the panels we're drawing: axes[panel][0] etc.
    #

    def on_draw(event):
        """
        Callback to draw the panel borders when the plots are drawn to the canvas
        """
        if panelBorderWeight <= 0:
            return False

        for p in axes.keys():
            bboxes = []
            for ax in axes[p]:
                bboxes.append(ax.bbox.union([label.get_window_extent() for label in
                                             ax.get_xticklabels() + ax.get_yticklabels()]))

            ax = axes[p][0]

            # this is the bbox that bounds all the bboxes, again in relative
            # figure coords

            bbox = ax.bbox.union(bboxes)

            xy0, xy1 = ax.transData.inverted().transform(bbox)
            x0, y0 = xy0
            x1, y1 = xy1
            w, h = x1 - x0, y1 - y0
            # allow a little space around BBox
            x0 -= 0.02*w
            w += 0.04*w
            y0 -= 0.02*h
            h += 0.04*h
            h += h*headroom
            # draw BBox
            ax.patches = []             # remove old ones
            rec = ax.add_patch(plt.Rectangle((x0, y0), w, h, fill=False,
                                             lw=panelBorderWeight, edgecolor=panelColor))
            rec.set_clip_on(False)

        return False

    fig.canvas.mpl_connect('draw_event', on_draw)
    #
    # Choose the plotting areas for each subplot
    #
    x0, y0 = plottingArea[0:2]
    W, H = plottingArea[2:4]
    w = (W - (Nx - 1)*pxgutter - (nx*Nx - 1)*xgutter)/float(nx*Nx)
    h = (H - (Ny - 1)*pygutter - (ny*Ny - 1)*ygutter)/float(ny*Ny)
    #
    # OK!  Time to create the subplots
    #
    for panel in range(Nx*Ny):
        axes[panel] = []
        px = panel%Nx
        py = panel//Nx
        for window in range(nx*ny):
            x = nx*px + window%nx
            y = ny*py + window//nx
            ax = fig.add_axes((x0 + xgutter + pxgutter + x*w + (px - 1)*pxgutter + (x - 1)*xgutter,
                               y0 + ygutter + pygutter + y*h + (py - 1)*pygutter + (y - 1)*ygutter,
                               w, h), frame_on=True, facecolor='w')
            axes[panel].append(ax)
            yield ax
Пример #26
0
    def doSuperTask(self, args, extra_args):
        """Implementation of run/show for SuperTask.

        Parameters
        ----------
        args : `argparse.Namespace`
            Parsed command line
        extra_args : `list` of `str`
            extra arguments for sub-command which were not parsed by parser
        """

        if args.do_help:
            # before displaying help populate sub-parser with task-specific options
            self._copyParserOptions(self.task_class.makeArgumentParser(),
                                    args.subparser)
            args.subparser.print_help()
            return

        if args.subcommand not in ("run", "show"):
            print("unexpected command {}".format(args.subcommand),
                  file=sys.stderr)
            return 2

        # parse remaining extra options
        task_args = self._reParseArgs(args, extra_args)

        # do all --show first
        # currently task parser handles that, we have to implement something
        # similar when we get rid of that

        if args.subcommand == "show":
            # stop here
            return

        # make task instance
        task = self._makeSuperTask(task_args.butler)

        # how many processed do we want
        numProc = task_args.processes
        if numProc > 1 and not self.task_class.canMultiprocess:
            lsstLog.warn(
                "This task does not support multiprocessing; using one process"
            )
            numProc = 1

        # execute it
        if self.precall(task, task_args):

            # chose map function being simple sequential map or multi-process map
            if numProc > 1:
                timeout = getattr(task_args, 'timeout', None)
                if timeout is None or timeout <= 0:
                    timeout = self.MP_TIMEOUT
                mapFunc = _MPMap(numProc, timeout)
            else:
                # map in Py3 returns iterable and we want a complete result
                mapFunc = lambda func, iterable: list(map(func, iterable))

            log = task_args.log
            target_list = self._makeTargetList(task_args)
            if target_list:
                # call task on each argument in a list
                profile_name = getattr(task_args, "profile", None)
                with profile(profile_name, log):
                    mapFunc(self._executeSuperTask, target_list)
            else:
                log.warn(
                    "Not running the task because there is no data to process; "
                    "you may preview data using \"--show-data\"")
Пример #27
0
    def run(self):
        credFileName = "~/.lsst/dbAuth-dbServ.ini"
        engine = getEngineFromFile(credFileName)
        dbName = "{}_fitsTest".format(engine.url.username)
        metaDb = MetadataFitsDb(credFileName)

        resp = None
        try:
            resp = self.client.search(WATCH_FOLDER, version="current", site="all",
                                      query="scanStatus = 'UNSCANNED'", max_num=1000)
        except DcException as error:
            if hasattr(error, "message"):
                log.warn("Error occurred:\nMessage: %s", error.message)
                if hasattr(error, "type"):
                    log.warn("Type: %s", error.type)
                if hasattr(error, "cause"):
                    log.warn("Cause: %s", error.cause)
            else:
                # Should have content
                log.warn(error.content)
            sys.exit(1)

        results = unpack(resp.content)

        for dataset in results:
            locations = dataset.locations
            check_location = None
            for location in locations:
                if location.site == WATCH_SITE:
                    check_location = location
                    break
            file_path = check_location.resource
            dataset_path = dataset.path
            stat = os.stat(file_path)
            cksum = self.get_cksum(file_path)

            # Note: While there may only be one version of a dataset,
            # we tie the metadata to versionMetadata
            scan_result = {}
            scan_result["size"] = stat.st_size
            scan_result["checksum"] = str(cksum)
            # UTC datetime in ISO format (Note: We need Z to denote UTC Time Zone)
            scan_result["locationScanned"] = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')
            scan_result["scanStatus"] = "OK"

            md = self.get_metadata(file_path)
            if md:
                scan_result["versionMetadata"] = md

            try:
                log.debug("patch_resp %s", str(file_path))
                patch_resp = self.client.patch_dataset(dataset_path, scan_result,
                                                       versionId=dataset.versionId, site=WATCH_SITE)
                log.debug("Inserting %s", str(file_path))
                fileId = metaDb.insertFile(file_path)
                metadata = {"fileId":fileId}
                md_patch = {}
                md_patch["versionMetadata"] = metadata
                md_patch_resp = self.client.patch_dataset(dataset_path, md_patch,
                                                          versionId=dataset.versionId)
                log.info("Inserted %d %s", fileId, str(file_path))
            except DcException as err:
                log.warn("Encountered error while updating dataset %s", str(file_path), err)
Пример #28
0
def makeSubplots(fig, nx=2, ny=2, Nx=1, Ny=1, plottingArea=(0.1, 0.1, 0.85, 0.80),
                 pxgutter=0.05, pygutter=0.05, xgutter=0.04, ygutter=0.04,
                 headroom=0.0, panelBorderWeight=0, panelColor='black'):
    """Return a generator of a set of subplots, a set of Nx*Ny panels of nx*ny plots.  Each panel is fully
    filled by row (starting in the bottom left) before the next panel is started.  If panelBorderWidth is
    greater than zero a border is drawn around each panel, adjusted to enclose the axis labels.

    E.g.
    subplots = makeSubplots(fig, 2, 2, Nx=1, Ny=1, panelColor='k')
    ax = subplots.next(); ax.text(0.3, 0.5, '[0, 0] (0,0)')
    ax = subplots.next(); ax.text(0.3, 0.5, '[0, 0] (1,0)')
    ax = subplots.next(); ax.text(0.3, 0.5, '[0, 0] (0,1)')
    ax = subplots.next(); ax.text(0.3, 0.5, '[0, 0] (1,1)')
    fig.show()

    @param fig    The matplotlib figure to draw
    @param nx     The number of plots in each row of each panel
    @param ny     The number of plots in each column of each panel
    @param Nx     The number of panels in each row of the figure
    @param Ny     The number of panels in each column of the figure
    @param plottingArea  (x0, y0, x1, y1) for the part of the figure containing all the panels
    @param pxgutter Spacing between columns of panels in units of (x1 - x0)
    @param pygutter Spacing between rows of panels in units of (y1 - y0)
    @param xgutter  Spacing between columns of plots within a panel in units of (x1 - x0)
    @param ygutter  Spacing between rows of plots within a panel in units of (y1 - y0)
    @param headroom Extra spacing above each plot for e.g. a title
    @param panelBorderWeight Width of border drawn around panels
    @param panelColor Colour of border around panels
    """

    log = lsst.log.Log.getLogger("utils.makeSubplots")
    try:
        import matplotlib.pyplot as plt
    except ImportError as e:
        log.warn("Unable to import matplotlib: %s", e)
        return

    # Make show() call canvas.draw() too so that we know how large the axis labels are.  Sigh
    try:
        fig.__show
    except AttributeError:
        fig.__show = fig.show

        def myShow(fig):
            fig.__show()
            fig.canvas.draw()

        import types
        fig.show = types.MethodType(myShow, fig)
    #
    # We can't get the axis sizes until after draw()'s been called, so use a callback  Sigh^2
    #
    axes = {}                           # all axes in all the panels we're drawing: axes[panel][0] etc.
    #

    def on_draw(event):
        """
        Callback to draw the panel borders when the plots are drawn to the canvas
        """
        if panelBorderWeight <= 0:
            return False

        for p in axes.keys():
            bboxes = []
            for ax in axes[p]:
                bboxes.append(ax.bbox.union([label.get_window_extent() for label in
                                             ax.get_xticklabels() + ax.get_yticklabels()]))

            ax = axes[p][0]

            # this is the bbox that bounds all the bboxes, again in relative
            # figure coords

            bbox = ax.bbox.union(bboxes)

            xy0, xy1 = ax.transData.inverted().transform(bbox)
            x0, y0 = xy0
            x1, y1 = xy1
            w, h = x1 - x0, y1 - y0
            # allow a little space around BBox
            x0 -= 0.02*w
            w += 0.04*w
            y0 -= 0.02*h
            h += 0.04*h
            h += h*headroom
            # draw BBox
            ax.patches = []             # remove old ones
            rec = ax.add_patch(plt.Rectangle((x0, y0), w, h, fill=False,
                                             lw=panelBorderWeight, edgecolor=panelColor))
            rec.set_clip_on(False)

        return False

    fig.canvas.mpl_connect('draw_event', on_draw)
    #
    # Choose the plotting areas for each subplot
    #
    x0, y0 = plottingArea[0:2]
    W, H = plottingArea[2:4]
    w = (W - (Nx - 1)*pxgutter - (nx*Nx - 1)*xgutter)/float(nx*Nx)
    h = (H - (Ny - 1)*pygutter - (ny*Ny - 1)*ygutter)/float(ny*Ny)
    #
    # OK!  Time to create the subplots
    #
    for panel in range(Nx*Ny):
        axes[panel] = []
        px = panel%Nx
        py = panel//Nx
        for window in range(nx*ny):
            x = nx*px + window%nx
            y = ny*py + window//nx
            ax = fig.add_axes((x0 + xgutter + pxgutter + x*w + (px - 1)*pxgutter + (x - 1)*xgutter,
                               y0 + ygutter + pygutter + y*h + (py - 1)*pygutter + (y - 1)*ygutter,
                               w, h), frame_on=True, facecolor='w')
            axes[panel].append(ax)
            yield ax
Пример #29
0
def runOneFilter(repo,
                 visitDataIds,
                 brightSnrMin=None,
                 brightSnrMax=None,
                 makeJson=True,
                 filterName=None,
                 outputPrefix='',
                 doApplyExternalPhotoCalib=False,
                 externalPhotoCalibName=None,
                 doApplyExternalSkyWcs=False,
                 externalSkyWcsName=None,
                 skipTEx=False,
                 verbose=False,
                 metrics_package='verify_metrics',
                 instrument='Unknown',
                 dataset_repo_url='./',
                 skipNonSrd=False,
                 **kwargs):
    r"""Main executable for the case where there is just one filter.

    Plot files and JSON files are generated in the local directory
    prefixed with the repository name (where '_' replace path separators),
    unless overriden by specifying `outputPrefix`.
    E.g., Analyzing a repository ``CFHT/output``
    will result in filenames that start with ``CFHT_output_``.

    Parameters
    ----------
    repo : string or Butler
        A Butler or a repository URL that can be used to construct one.
    dataIds : list of dict
        List of `butler` data IDs of Image catalogs to compare to reference.
        The `calexp` pixel image is needed for the photometric calibration
        unless doApplyExternalPhotoCalib is True such
        that the appropriate `photoCalib` dataset is used. Note that these
        have data IDs that include the tract number.
    brightSnrMin : float, optional
        Minimum median SNR for a source to be considered bright; passed to
        `lsst.validate.drp.matchreduce.build_matched_dataset`.
    brightSnrMax : float, optional
        Maximum median SNR for a source to be considered bright; passed to
        `lsst.validate.drp.matchreduce.build_matched_dataset`.
    makeJson : bool, optional
        Create JSON output file for metrics.  Saved to current working directory.
    outputPrefix : str, optional
        Specify the beginning filename for output files.
    filterName : str, optional
        Name of the filter (bandpass).
    doApplyExternalPhotoCalib : bool, optional
        Apply external photoCalib to calibrate fluxes.
    externalPhotoCalibName : str, optional
        Type of external `PhotoCalib` to apply.  Currently supported are jointcal,
        fgcm, and fgcm_tract.  Must be set if doApplyExternalPhotoCalib is True.
    doApplyExternalSkyWcs : bool, optional
        Apply external wcs to calibrate positions.
    externalSkyWcsName : str, optional
        Type of external `wcs` to apply.  Currently supported is jointcal.
        Must be set if "doApplyExternalSkyWcs" is True.
    skipTEx : bool, optional
        Skip TEx calculations (useful for older catalogs that don't have
        PsfShape measurements).
    verbose : bool, optional
        Output additional information on the analysis steps.
    skipNonSrd : bool, optional
        Skip any metrics not defined in the LSST SRD.

    Raises
    ------
    RuntimeError:
        Raised if "doApplyExternalPhotoCalib" is True and "externalPhotoCalibName"
        is None, or if "doApplyExternalSkyWcs" is True and "externalSkyWcsName" is
        None.
    """

    if kwargs:
        log.warn(
            f"Extra kwargs - {kwargs}, will be ignored. Did you add extra things to your config file?"
        )

    if doApplyExternalPhotoCalib and externalPhotoCalibName is None:
        raise RuntimeError(
            "Must set externalPhotoCalibName if doApplyExternalPhotoCalib is True."
        )
    if doApplyExternalSkyWcs and externalSkyWcsName is None:
        raise RuntimeError(
            "Must set externalSkyWcsName if doApplyExternalSkyWcs is True.")

    # collect just the common key, value pairs to omit the keys that are aggregated over
    job_metadata = dict(
        set.intersection(*[set(vid.items()) for vid in visitDataIds]))

    # update with metadata passed into the method
    job_metadata.update({
        'instrument': instrument,
        'filter_name': filterName,
        'dataset_repo_url': dataset_repo_url
    })

    job = Job.load_metrics_package(meta=job_metadata,
                                   subset='validate_drp',
                                   package_name_or_path=metrics_package)

    matchedDataset = build_matched_dataset(
        repo,
        visitDataIds,
        doApplyExternalPhotoCalib=doApplyExternalPhotoCalib,
        externalPhotoCalibName=externalPhotoCalibName,
        doApplyExternalSkyWcs=doApplyExternalSkyWcs,
        externalSkyWcsName=externalSkyWcsName,
        skipTEx=skipTEx,
        skipNonSrd=skipNonSrd,
        brightSnrMin=brightSnrMin,
        brightSnrMax=brightSnrMax)

    snr = matchedDataset['snr'].quantity
    bright = (matchedDataset['brightSnrMin'].quantity <
              snr) & (snr < matchedDataset['brightSnrMax'].quantity)
    photomModel = build_photometric_error_model(matchedDataset, bright)
    astromModel = build_astrometric_error_model(matchedDataset, bright)

    linkedBlobs = [matchedDataset, photomModel, astromModel]

    metrics = job.metrics
    specs = job.specs

    def add_measurement(measurement):
        for blob in linkedBlobs:
            measurement.link_blob(blob)
        job.measurements.insert(measurement)

    for x, D in zip((1, 2, 3), (5., 20., 200.)):
        amxName = 'AM{0:d}'.format(x)
        afxName = 'AF{0:d}'.format(x)
        adxName = 'AD{0:d}'.format(x)

        amx = measureAMx(metrics['validate_drp.' + amxName],
                         matchedDataset,
                         D * u.arcmin,
                         verbose=verbose)
        add_measurement(amx)

        afx_spec_set = specs.subset(required_meta={'instrument': 'HSC'},
                                    spec_tags=[
                                        afxName,
                                    ])
        adx_spec_set = specs.subset(required_meta={'instrument': 'HSC'},
                                    spec_tags=[
                                        adxName,
                                    ])
        for afx_spec_key, adx_spec_key in zip(afx_spec_set, adx_spec_set):
            afx_spec = afx_spec_set[afx_spec_key]
            adx_spec = adx_spec_set[adx_spec_key]
            adx = measureADx(metrics[adx_spec.metric_name], amx, afx_spec)
            add_measurement(adx)
            afx = measureAFx(metrics[afx_spec.metric_name], amx, adx, adx_spec)
            add_measurement(afx)

    pa1 = measurePA1(metrics['validate_drp.PA1'], filterName,
                     matchedDataset.matchesBright, matchedDataset.magKey)
    add_measurement(pa1)

    pf1_spec_set = specs.subset(required_meta={
        'instrument': instrument,
        'filter_name': filterName
    },
                                spec_tags=[
                                    'PF1',
                                ])
    pa2_spec_set = specs.subset(required_meta={
        'instrument': instrument,
        'filter_name': filterName
    },
                                spec_tags=[
                                    'PA2',
                                ])
    # I worry these might not always be in the right order.  Sorting...
    pf1_spec_keys = list(pf1_spec_set.keys())
    pa2_spec_keys = list(pa2_spec_set.keys())
    pf1_spec_keys.sort()
    pa2_spec_keys.sort()
    for pf1_spec_key, pa2_spec_key in zip(pf1_spec_keys, pa2_spec_keys):
        pf1_spec = pf1_spec_set[pf1_spec_key]
        pa2_spec = pa2_spec_set[pa2_spec_key]

        pa2 = measurePA2(metrics[pa2_spec.metric_name], pa1,
                         pf1_spec.threshold)
        add_measurement(pa2)

        pf1 = measurePF1(metrics[pf1_spec.metric_name], pa1, pa2_spec)
        add_measurement(pf1)

    if not skipTEx:
        for x, D, bin_range_operator in zip((1, 2), (1.0, 5.0), ("<=", ">=")):
            texName = 'TE{0:d}'.format(x)
            tex = measureTEx(metrics['validate_drp.' + texName],
                             matchedDataset,
                             D * u.arcmin,
                             bin_range_operator,
                             verbose=verbose)
            add_measurement(tex)

    if not skipNonSrd:
        model_phot_reps = measure_model_phot_rep(metrics, filterName,
                                                 matchedDataset)
        for measurement in model_phot_reps:
            add_measurement(measurement)

    if makeJson:
        job.write(outputPrefix + '.json')

    return job
Пример #30
0
def plotPsfSpatialModel(exposure, psf, psfCellSet, showBadCandidates=True, numSample=128,
                        matchKernelAmplitudes=False, keepPlots=True):
    """Plot the PSF spatial model."""

    log = lsst.log.Log.getLogger("utils.plotPsfSpatialModel")
    try:
        import matplotlib.pyplot as plt
        import matplotlib as mpl
    except ImportError as e:
        log.warn("Unable to import matplotlib: %s", e)
        return

    noSpatialKernel = psf.getKernel()
    candPos = list()
    candFits = list()
    badPos = list()
    badFits = list()
    candAmps = list()
    badAmps = list()
    for cell in psfCellSet.getCellList():
        for cand in cell.begin(False):
            if not showBadCandidates and cand.isBad():
                continue
            candCenter = lsst.geom.PointD(cand.getXCenter(), cand.getYCenter())
            try:
                im = cand.getMaskedImage()
            except Exception:
                continue

            fit = fitKernelParamsToImage(noSpatialKernel, im, candCenter)
            params = fit[0]
            kernels = fit[1]
            amp = 0.0
            for p, k in zip(params, kernels):
                amp += p * k.getSum()

            targetFits = badFits if cand.isBad() else candFits
            targetPos = badPos if cand.isBad() else candPos
            targetAmps = badAmps if cand.isBad() else candAmps

            targetFits.append([x / amp for x in params])
            targetPos.append(candCenter)
            targetAmps.append(amp)

    xGood = numpy.array([pos.getX() for pos in candPos]) - exposure.getX0()
    yGood = numpy.array([pos.getY() for pos in candPos]) - exposure.getY0()
    zGood = numpy.array(candFits)

    xBad = numpy.array([pos.getX() for pos in badPos]) - exposure.getX0()
    yBad = numpy.array([pos.getY() for pos in badPos]) - exposure.getY0()
    zBad = numpy.array(badFits)
    numBad = len(badPos)

    xRange = numpy.linspace(0, exposure.getWidth(), num=numSample)
    yRange = numpy.linspace(0, exposure.getHeight(), num=numSample)

    kernel = psf.getKernel()
    nKernelComponents = kernel.getNKernelParameters()
    #
    # Figure out how many panels we'll need
    #
    nPanelX = int(math.sqrt(nKernelComponents))
    nPanelY = nKernelComponents//nPanelX
    while nPanelY*nPanelX < nKernelComponents:
        nPanelX += 1

    fig = plt.figure(1)
    fig.clf()
    try:
        fig.canvas._tkcanvas._root().lift()  # == Tk's raise, but raise is a python reserved word
    except Exception:                                  # protect against API changes
        pass
    #
    # Generator for axes arranged in panels
    #
    mpl.rcParams["figure.titlesize"] = "x-small"
    subplots = makeSubplots(fig, 2, 2, Nx=nPanelX, Ny=nPanelY, xgutter=0.06, ygutter=0.06, pygutter=0.04)

    for k in range(nKernelComponents):
        func = kernel.getSpatialFunction(k)
        dfGood = zGood[:, k] - numpy.array([func(pos.getX(), pos.getY()) for pos in candPos])
        yMin = dfGood.min()
        yMax = dfGood.max()
        if numBad > 0:
            dfBad = zBad[:, k] - numpy.array([func(pos.getX(), pos.getY()) for pos in badPos])
            yMin = min([yMin, dfBad.min()])
            yMax = max([yMax, dfBad.max()])
        yMin -= 0.05 * (yMax - yMin)
        yMax += 0.05 * (yMax - yMin)

        yMin = -0.01
        yMax = 0.01

        fRange = numpy.ndarray((len(xRange), len(yRange)))
        for j, yVal in enumerate(yRange):
            for i, xVal in enumerate(xRange):
                fRange[j][i] = func(xVal, yVal)

        ax = next(subplots)

        ax.set_autoscale_on(False)
        ax.set_xbound(lower=0, upper=exposure.getHeight())
        ax.set_ybound(lower=yMin, upper=yMax)
        ax.plot(yGood, dfGood, 'b+')
        if numBad > 0:
            ax.plot(yBad, dfBad, 'r+')
        ax.axhline(0.0)
        ax.set_title('Residuals(y)')

        ax = next(subplots)

        if matchKernelAmplitudes and k == 0:
            vmin = 0.0
            vmax = 1.1
        else:
            vmin = fRange.min()
            vmax = fRange.max()

        norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
        im = ax.imshow(fRange, aspect='auto', origin="lower", norm=norm,
                       extent=[0, exposure.getWidth()-1, 0, exposure.getHeight()-1])
        ax.set_title('Spatial poly')
        plt.colorbar(im, orientation='horizontal', ticks=[vmin, vmax])

        ax = next(subplots)
        ax.set_autoscale_on(False)
        ax.set_xbound(lower=0, upper=exposure.getWidth())
        ax.set_ybound(lower=yMin, upper=yMax)
        ax.plot(xGood, dfGood, 'b+')
        if numBad > 0:
            ax.plot(xBad, dfBad, 'r+')
        ax.axhline(0.0)
        ax.set_title('K%d Residuals(x)' % k)

        ax = next(subplots)

        photoCalib = exposure.getPhotoCalib()
        # If there is no calibration factor, use 1.0.
        if photoCalib.getCalibrationMean() <= 0:
            photoCalib = afwImage.PhotoCalib(1.0)

        ampMag = [photoCalib.isntFluxToMagnitude(candAmp) for candAmp in candAmps]
        ax.plot(ampMag, zGood[:, k], 'b+')
        if numBad > 0:
            badAmpMag = [photoCalib.isntFluxToMagnitude(badAmp) for badAmp in badAmps]
            ax.plot(badAmpMag, zBad[:, k], 'r+')

        ax.set_title('Flux variation')

    fig.show()

    global keptPlots
    if keepPlots and not keptPlots:
        # Keep plots open when done
        def show():
            print("%s: Please close plots when done." % __name__)
            try:
                plt.show()
            except Exception:
                pass
            print("Plots closed, exiting...")
        import atexit
        atexit.register(show)
        keptPlots = True