Пример #1
0
    def _link(self, clipboard):
        """
        Link one or more sourcePaths (from policy) to destPaths after
        formatting each with additionalData derived from the clipboard and
        stage information.
        """
        if not self.policy.exists('Links'):
            mylog = Log(Log.defaultLog(),
                        "pex.harness.SymLinkStage.SymLinkStageSerial")
            mylog.log(Log.WARN, "No Links found")
            return

        additionalData = lsst.pex.harness.Utils.createAdditionalData(self, \
                    self.policy, clipboard)

        linkPolicyList = self.policy.getPolicyArray('Links')
        for linkPolicy in linkPolicyList:
            sourcePath = lsst.daf.persistence.LogicalLocation(
                linkPolicy.getString('sourcePath'),
                additionalData).locString()
            destPath = lsst.daf.persistence.LogicalLocation(
                linkPolicy.getString('destPath'), additionalData).locString()
            lsst.pex.logging.Trace("pex.harness.SymLinkStage.SymLinkStageSerial", 3, \
                    "linking %s to %s" % (sourcePath, destPath))
            parentDir = os.path.dirname(destPath)
            if parentDir and not os.path.exists(parentDir):
                os.makedirs(parentDir)
            try:
                os.symlink(sourcePath, destPath)
            except OSError, e:
                # ignore "file exists" but re-raise anything else
                if e.errno != 17:
                    raise e
 def shutdown(self): 
     """
     Shutdown the Slice execution
     """
     shutlog = Log(self.log, "shutdown", Log.INFO);
     shutlog.log(Log.INFO, "Shutting down Slice")
     self.cppSlice.shutdown()
class FakeOutput(harnessStage.ParallelProcessing):
    """
    this stage simulates work by sleeping
    """

    def setup(self):
        if not self.log:
            self.log = Log.getDefaultLog()
        self.mylog = Log(self.log, "output")
        self.outputDatasetsKey = \
                    self.policy.getString("inputKeys.outputDatasets")
        self.possibleDatasetsKey = \
                    self.policy.getString("inputKeys.possibleDatasets")

    def process(self, clipboard):
        expected = clipboard.get(self.possibleDatasetsKey)
        outputds = clipboard.get(self.outputDatasetsKey)

        # this implementation will pretend to write out all of the
        # expected datasets.  It will also put each dataset written
        # out into the outputDatasets list.
        if expected:
            for ds in expected:
                self.mylog.log(Log.INFO, "Writing out " + ds.toString())
                outputds.append(ds)
        else:
            self.log.log(Log.WARN, "No expected datasets on clipboard")
            

        clipboard.put(self.outputDatasetsKey, outputds)
Пример #4
0
    def checkExitByVisit(self):
        log = Log(self.log, "checkExitByVisit")

        if ((self._stop.isSet()) and (self.exitLevel == 4)):
            log.log(Log.INFO, "Pipeline stop is set at exitLevel of 4")
            log.log(Log.INFO, "Exit here at the end of the Visit")
            self.forceShutdown = 1
Пример #5
0
 def shutdown(self):
     """
     Shutdown the Slice execution
     """
     shutlog = Log(self.log, "shutdown", Log.INFO)
     shutlog.log(Log.INFO, "Shutting down Slice")
     self.cppSlice.shutdown()
class IsrDarkStageParallel(harnessStage.ParallelProcessing):
    """
    Description:

    Policy Dictionary:

    Clipboard Input:

    ClipboardOutput:
    """
    def setup(self):
        self.log = Log(self.log, "IsrDarkStage - parallel")

        policyFile = pexPolicy.DefaultPolicyFile("ip_pipeline", "IsrDarkStageDictionary.paf", "policy")
        defPolicy = pexPolicy.Policy.createPolicy(policyFile, policyFile.getRepositoryPath())

        if self.policy is None:
            self.policy = pexPolicy.Policy()
        self.policy.mergeDefaults(defPolicy)

    def process(self, clipboard):
        """
        """
        self.log.log(Log.INFO, "Doing dark subtraction.")
        
        #grab exposure and dark from clipboard
        darkexposure = clipboard.get(self.policy.getString("inputKeys.darkexposure"))
        exposure = clipboard.get(self.policy.getString("inputKeys.exposure"))
        darkscaling = darkexposure.getCalib().getExptime()
        expscaling = exposure.getCalib().getExptime()
        ipIsr.darkCorrection(exposure, darkexposure, float(expscaling),
                float(darkscaling))

        #output products
        clipboard.put(self.policy.get("outputKeys.darkSubtractedExposure"), exposure)
class IsrVarianceStageParallel(harnessStage.ParallelProcessing):
    """
    Description:

    Policy Dictionary:

    Clipboard Input:

    ClipboardOutput:
    """
    def setup(self):
        self.log = Log(self.log, "IsrVarianceStage - parallel")

        policyFile = pexPolicy.DefaultPolicyFile("ip_pipeline", "IsrVarianceStageDictionary.paf", "policy")
        defPolicy = pexPolicy.Policy.createPolicy(policyFile, policyFile.getRepositoryPath())

        if self.policy is None:
            self.policy = pexPolicy.Policy()
        self.policy.mergeDefaults(defPolicy)

    def process(self, clipboard):
        """
        """
        self.log.log(Log.INFO, "Calculating variance from image counts.")
        
        #grab exposure from clipboard
        exposure = clipboard.get(self.policy.getString("inputKeys.exposure"))
        ipIsr.updateVariance(exposure)
        #output products
        clipboard.put(self.policy.get("outputKeys.varianceAddedExposure"), exposure)
Пример #8
0
class IsrCcdSdqaStageParallel(harnessStage.ParallelProcessing):
    """
    Description:

    Policy Dictionary:

    Clipboard Input:

    ClipboardOutput:
    """
    def setup(self):
        self.log = Log(self.log, "CcdSdqaStage -- Parallel")

        policyFile = pexPolicy.DefaultPolicyFile(
            "ip_pipeline", "IsrCcdSdqaStageDictionary.paf", "policy")
        defPolicy = pexPolicy.Policy.createPolicy(
            policyFile, policyFile.getRepositoryPath())

        if self.policy is None:
            self.policy = pexPolicy.Policy()
        self.policy.mergeDefaults(defPolicy)

    def process(self, clipboard):
        """
        """
        self.log.log(Log.INFO,
                     "Calculate SDQA metrics based on the assembled ccd.")

        #grab exposure from clipboard
        exposure = clipboard.get(
            self.policy.getString("inputKeys.ccdExposure"))
        ipIsr.calculateSdqaCcdRatings(exposure)
        #output products
        clipboard.put(self.policy.get("outputKeys.sdqaCcdExposure"), exposure)
    def checkExitBySyncPoint(self): 
        log = Log(self.log, "checkExitBySyncPoint")

        if((self._stop.isSet()) and (self.exitLevel == 2)):
            log.log(Log.INFO, "Pipeline stop is set at exitLevel of 2")
            log.log(Log.INFO, "Exit here at a Synchronization point")
            self.forceShutdown = 1
    def checkExitByStage(self): 
        log = Log(self.log, "checkExitByStage")

        if((self._stop.isSet()) and (self.exitLevel == 3)):
            log.log(Log.INFO, "Pipeline stop is set at exitLevel of 3")
            log.log(Log.INFO, "Exit here at the end of the Stage")
            self.forceShutdown = 1
Пример #11
0
    def checkExitByStage(self):
        log = Log(self.log, "checkExitByStage")

        if ((self._stop.isSet()) and (self.exitLevel == 3)):
            log.log(Log.INFO, "Pipeline stop is set at exitLevel of 3")
            log.log(Log.INFO, "Exit here at the end of the Stage")
            self.forceShutdown = 1
Пример #12
0
    def checkExitBySyncPoint(self):
        log = Log(self.log, "checkExitBySyncPoint")

        if ((self._stop.isSet()) and (self.exitLevel == 2)):
            log.log(Log.INFO, "Pipeline stop is set at exitLevel of 2")
            log.log(Log.INFO, "Exit here at a Synchronization point")
            self.forceShutdown = 1
class IsrCcdSdqaStageParallel(harnessStage.ParallelProcessing):
    """
    Description:

    Policy Dictionary:

    Clipboard Input:

    ClipboardOutput:
    """
    def setup(self):
        self.log = Log(self.log, "CcdSdqaStage -- Parallel")

        policyFile = pexPolicy.DefaultPolicyFile("ip_pipeline",
                "IsrCcdSdqaStageDictionary.paf", "policy")
        defPolicy = pexPolicy.Policy.createPolicy(policyFile, policyFile.getRepositoryPath())

        if self.policy is None:
            self.policy = pexPolicy.Policy()
        self.policy.mergeDefaults(defPolicy)

    def process(self, clipboard):
        """
        """
        self.log.log(Log.INFO, "Calculate SDQA metrics based on the assembled ccd.")
        
        #grab exposure from clipboard
        exposure = clipboard.get(self.policy.getString("inputKeys.ccdExposure"))
        ipIsr.calculateSdqaCcdRatings(exposure)
        #output products
        clipboard.put(self.policy.get("outputKeys.sdqaCcdExposure"),
                exposure)
    def checkExitByVisit(self): 
        log = Log(self.log, "checkExitByVisit")

        if((self._stop.isSet()) and (self.exitLevel == 4)):
            log.log(Log.INFO, "Pipeline stop is set at exitLevel of 4")
            log.log(Log.INFO, "Exit here at the end of the Visit")
            self.forceShutdown = 1
class IsrBiasStageParallel(harnessStage.ParallelProcessing):
    """
    Description:

    Policy Dictionary:

    Clipboard Input:

    ClipboardOutput:
    """
    def setup(self):
        self.log = Log(self.log, "IsrBiasStage - parallel")

        policyFile = pexPolicy.DefaultPolicyFile("ip_pipeline", "IsrBiasStageDictionary.paf", "policy")
        defPolicy = pexPolicy.Policy.createPolicy(policyFile, policyFile.getRepositoryPath())

        if self.policy is None:
            self.policy = pexPolicy.Policy()
        self.policy.mergeDefaults(defPolicy)

    def process(self, clipboard):
        """
        """
        self.log.log(Log.INFO, "Doing bias subtraction.")
        
        #grab exposure and bias from clipboard
        biasexposure = clipboard.get(self.policy.getString("inputKeys.biasexposure"))
        exposure = clipboard.get(self.policy.getString("inputKeys.exposure"))
        ipIsr.biasCorrection(exposure, biasexposure)
        #output products
        clipboard.put(self.policy.get("outputKeys.biasSubtractedExposure"), exposure)
Пример #16
0
class IsrFringeStageParallel(harnessStage.ParallelProcessing):
    """
    Description:

    Policy Dictionary:

    Clipboard Input:

    ClipboardOutput:
    """
    def setup(self):
        self.log = Log(self.log, "IsrFringeStage - parallel")

        policyFile = pexPolicy.DefaultPolicyFile("ip_pipeline", "IsrFringeStageDictionary.paf", "policy")
        defPolicy = pexPolicy.Policy.createPolicy(policyFile, policyFile.getRepositoryPath())

        if self.policy is None:
            self.policy = pexPolicy.Policy()
        self.policy.mergeDefaults(defPolicy)

    def process(self, clipboard):
        """
        """
        self.log.log(Log.INFO, "Doing Fringe correction.")
        
        #grab exposure from clipboard
        exposure = clipboard.get(self.policy.getString("inputKeys.exposure"))
        fringeexposure = clipboard.get(self.policy.getString("inputKeys.fringeexposure"))
        #fringeCorrection is not implemented
        self.log.log(Log.INFO, "ipIsr.fringeCorrection is not implemented -- \n\tOriginal exposure is returned")
        #ipIsr.fringeCorrection(exposure, fringeexposure)

        #output products
        clipboard.put(self.policy.get("outputKeys.fringeCorrectedExposure"), exposure)
Пример #17
0
    class _StopThread(threading.Thread):

        def __init__(self, joboffice, stopTopic, runId, brokerHost, 
                     brokerPort=None, waittime=60):

            threading.Thread.__init__(self, name=joboffice.getName()+".stop")
            self.setDaemon(True)
            
            self.jo = joboffice
            self.timeout = waittime

            self.log = Log(self.jo.log, "stop")

            selector = ""
            if runId:  selector = "RUNID='%s'" % runId
                
            if brokerPort:
                self.rcvr = EventReceiver(brokerHost, brokerPort, stopTopic,
                                          selector)
            else:
                self.rcvr = EventReceiver(brokerHost, stopTopic, selector)
                
        def run(self):
            while True:
                event = self.rcvr.receiveEvent(self.timeout)
                if event:
                    self.log.log(Log.INFO-1, "received stop event; " +
                                 "shutting down JobOffice thread")
                    self.jo.stop()
                if self.jo.halt:
                    return
Пример #18
0
class FakeOutput(harnessStage.ParallelProcessing):
    """
    this stage simulates work by sleeping
    """
    def setup(self):
        if not self.log:
            self.log = Log.getDefaultLog()
        self.mylog = Log(self.log, "output")
        self.outputDatasetsKey = \
                    self.policy.getString("inputKeys.outputDatasets")
        self.possibleDatasetsKey = \
                    self.policy.getString("inputKeys.possibleDatasets")

    def process(self, clipboard):
        expected = clipboard.get(self.possibleDatasetsKey)
        outputds = clipboard.get(self.outputDatasetsKey)

        # this implementation will pretend to write out all of the
        # expected datasets.  It will also put each dataset written
        # out into the outputDatasets list.
        if expected:
            for ds in expected:
                self.mylog.log(Log.INFO, "Writing out " + ds.toString())
                outputds.append(ds)
        else:
            self.log.log(Log.WARN, "No expected datasets on clipboard")

        clipboard.put(self.outputDatasetsKey, outputds)
Пример #19
0
class IsrVarianceStageParallel(harnessStage.ParallelProcessing):
    """
    Description:

    Policy Dictionary:

    Clipboard Input:

    ClipboardOutput:
    """
    def setup(self):
        self.log = Log(self.log, "IsrVarianceStage - parallel")

        policyFile = pexPolicy.DefaultPolicyFile(
            "ip_pipeline", "IsrVarianceStageDictionary.paf", "policy")
        defPolicy = pexPolicy.Policy.createPolicy(
            policyFile, policyFile.getRepositoryPath())

        if self.policy is None:
            self.policy = pexPolicy.Policy()
        self.policy.mergeDefaults(defPolicy)

    def process(self, clipboard):
        """
        """
        self.log.log(Log.INFO, "Calculating variance from image counts.")

        #grab exposure from clipboard
        exposure = clipboard.get(self.policy.getString("inputKeys.exposure"))
        ipIsr.updateVariance(exposure)
        #output products
        clipboard.put(self.policy.get("outputKeys.varianceAddedExposure"),
                      exposure)
Пример #20
0
 def shutdown(self):
     """
     Shutdown the Slice execution
     """
     shutlog = Log(self.log, "shutdown", Log.INFO)
     pid = os.getpid()
     shutlog.log(Log.INFO, "Shutting down Slice:  pid " + str(pid))
     os.kill(pid, signal.SIGKILL)
Пример #21
0
 def shutdown(self): 
     """
     Shutdown the Slice execution
     """
     shutlog = Log(self.log, "shutdown", Log.INFO);
     pid = os.getpid()
     shutlog.log(Log.INFO, "Shutting down Slice:  pid " + str(pid))
     os.kill(pid, signal.SIGKILL) 
Пример #22
0
class DiffImStageParallel(harnessStage.ParallelProcessing):
    """
    Description:
        This stage wraps image subtraction        

    Policy Dictionary:
    lsst/ip/pipeline/policy/DiffImStageDictionary.paf

    Clipboard Input:
    - Template Exposure : to be convolved
    - Science Exposure  : to be matched to

    Clipboard Output:
    - Difference Exposure : resulting difference image
    - Psf Matching Kernel : the spatial model of the Psf matching Kernel
    - Background Function : differential background model
    """
    def setup(self):
        self.log = Log(self.log, "DiffImStage - parallel")
        policyFile = pexPolicy.DefaultPolicyFile("ip_pipeline",
                                                 "DiffImStageDictionary.paf",
                                                 "policy")
        defPolicy = pexPolicy.Policy.createPolicy(
            policyFile,
            policyFile.getRepositoryPath(),  # repos
            True)  # validate

        if self.policy is None:
            self.policy = pexPolicy.Policy()
        self.policy.mergeDefaults(defPolicy.getDictionary())
        self.diffImPolicy = ipDiffim.makeDefaultPolicy()

    def process(self, clipboard):
        """
        Run image subtraction
        """
        self.log.log(Log.INFO, "Running image subtraction")

        # grab exposures from clipboard
        templateExposure = clipboard.get(
            self.policy.getString("inputKeys.templateExposureKey"))
        scienceExposure = clipboard.get(
            self.policy.getString("inputKeys.scienceExposureKey"))

        # run image subtraction
        psfMatch = ipDiffim.ImagePsfMatch(self.diffImPolicy)
        results = psfMatch.subtractExposures(templateExposure, scienceExposure)

        # parse results
        differenceExposure, spatialKernel, backgroundModel, kernelCellSet = results

        #output products
        clipboard.put(self.policy.get("outputKeys.differenceExposureKey"),
                      differenceExposure)
        clipboard.put(self.policy.get("outputKeys.psfMatchingKernelKey"),
                      spatialKernel)
        clipboard.put(self.policy.get("outputKeys.backgroundFunctionKey"),
                      backgroundModel)
class SourceMeasurementStageParallel(harnessStage.ParallelProcessing):
    """
    Description:
        This stage wraps the measurement of sources on an exposure.
        The exposures to measure on should be in the clipboard along with the
        FootprintSet(s) to measure on those exposures. The keys for the
        exposures, and the FootprintSet(s) can be specified in the 
        policy file. If not specified, default keys will be used
    """
    def setup(self):
        self.log = Log(self.log, "SourceMeasurementStage - parallel")

        policyFile = pexPolicy.DefaultPolicyFile("meas_pipeline", 
            "SourceMeasurementStageDictionary.paf", "policy")
        defPolicy = pexPolicy.Policy.createPolicy(policyFile, policyFile.getRepositoryPath(), True)

        if self.policy is None:
            self.policy = defPolicy
        else:
            self.policy.mergeDefaults(defPolicy.getDictionary())
        
    def process(self, clipboard):
        """
        Measure sources in the worker process
        """
        self.log.log(Log.INFO, "Measuring Sources in process")
        
        #this may raise exceptions
        try:
            measurePolicy, exposure, psf, positiveDetection, negativeDetection = \
                           self.getClipboardData(clipboard)
        except pexExcept.LsstException, e:
            self.log.log(Log.FATAL, str(e))
         
        #
        # Need to do something smart about merging positive and negative
        # detection sets.             
        #
        # For now, assume they are disjoint sets, so merge is trivial
        #
        footprintLists = []
        if positiveDetection:
            self.log.log(Log.DEBUG, "Positive FootprintSet found")
            isNegative = False
            footprintLists.append([positiveDetection.getFootprints(), isNegative])

        if negativeDetection:
            self.log.log(Log.DEBUG, "Negative FootprintSet found")
            isNegative = True
            footprintLists.append([negativeDetection.getFootprints(), isNegative])

        sourceSet = srcMeas.sourceMeasurement(exposure, psf, footprintLists, measurePolicy)
        
        # place SourceSet on the clipboard
        sourceKey = self.policy.get("outputKeys.sources")
        clipboard.put(sourceKey, sourceSet)
        clipboard.put(sourceKey + "_persistable", afwDet.PersistableSourceVector(sourceSet))
Пример #24
0
    def preprocess(self, clipboard):
        """
        Processing code for this Stage to be executed by the main Pipeline 
        prior to invoking Slice process 
        """
        log = Log(self.log,
                  "lsst.pexhexamples.pipeline.SampleStageSerial.preprocess")

        log.log(Log.INFO, 'Executing SampleStageSerial preprocess')
Пример #25
0
 def __init__(self, fullPath=None):
     if fullPath is None:
         pDir = os.environ["CAT_DIR"]
         if pDir is None:
             raise RuntimeError('CAT_DIR env var required')
         fullPath = os.path.join(pDir, 'policy/defaultProdCatPolicy.paf')
     self.policyObj = pexPolicy.Policy.createPolicy(fullPath)
     log = Log(Log.getDefaultLog(), "cat")
     log.log(Log.DEBUG, 'Reading policy from %s' % fullPath)
Пример #26
0
class CrRejectStageParallel(harnessStage.ParallelProcessing):
    """
    Description:
        This stage wraps estimating and possibly subtracting cosmic rays from an exposure
        on the clipboard.        

    Policy Dictionary:
    lsst/ip/pipeline/policy/CrRejectStageDictionary.paf

    Clipboard Input:
    - Calibrated science Exposure(s) (without background)
    - a PSF may be specified by policy attribute inputPsfKey. Alternatively, the
      stage's policy may request that a psf be constructed, by providing the
      psfPolicy attribute.

    ClipboardOutput:
    - Exposure with CRs removed. Key specified
        by policy attribute 'crSubtractedExposureKey'
    - nCR The number of CRs detected
    - PSF: the psf used to smooth the exposure before detection 
        Key specified by policy attribute 'psfKey'
    """
    def setup(self):
        self.log = Log(self.log, "CrRejectStage - parallel")

        policyFile = pexPolicy.DefaultPolicyFile("ip_pipeline", "CrRejectStageDictionary.paf", "policy")
        defPolicy = pexPolicy.Policy.createPolicy(policyFile, policyFile.getRepositoryPath(), True)

        if self.policy is None:
            self.policy = pexPolicy.Policy()
        self.policy.mergeDefaults(defPolicy.getDictionary())

        self.crRejectPolicy = self.policy.get("crRejectPolicy")

    def process(self, clipboard):
        """
        Detect CRs in the worker process
        """
        self.log.log(Log.INFO, "Detecting CRs in process")
        
        #grab exposure from clipboard
        exposure = clipboard.get(self.policy.get("inputKeys.exposure"))

        defaultFwhm = self.policy.get('parameters.defaultFwhm') # in arcsec
        keepCRs = self.policy.get('parameters.keepCRs')

        crs = ipUtils.cosmicRays.findCosmicRays(exposure, self.crRejectPolicy, defaultFwhm, keepCRs)
        nCR = len(crs)

        #output products
        clipboard.put("nCR", nCR)
        clipboard.put(self.policy.get("outputKeys.exposure"), exposure)
class BackgroundEstimationStageParallel(harnessStage.ParallelProcessing):
    """
    Description:
        This stage wraps estimating and possibly subtracting the background from an exposure
        on the clipboard.        

    Policy Dictionary:
    lsst/meas/pipeline/policy/BackgroundEstimationStageDictionary.paf

    Clipboard Input:
    - Calibrated science Exposure(s) (including background)

    ClipboardOutput:
    - background subtracted Exposure used in the detection. Key specified
        by policy attribute 'backgroundSubtractedExposure'
    - the measured background object itself. Key specified by policy 
        attribute 'background'        
    """
    def setup(self):
        self.log = Log(self.log, "BackgroundEstimationStage - parallel")

        policyFile = pexPolicy.DefaultPolicyFile(
            "meas_pipeline", "BackgroundEstimationStageDictionary.paf",
            "policy")
        defPolicy = pexPolicy.Policy.createPolicy(
            policyFile, policyFile.getRepositoryPath(), True)

        if self.policy is None:
            self.policy = pexPolicy.Policy()
        self.policy.mergeDefaults(defPolicy.getDictionary())

    def process(self, clipboard):
        """
        Detect sources in the worker process
        """
        self.log.log(Log.INFO, "Subtracting background in process")

        #grab exposure from clipboard
        exposure = clipboard.get(self.policy.get("inputKeys.exposure"))

        #estimate and maybe subtract the background
        background, backgroundSubtractedExposure = sourceDetection.estimateBackground(
            exposure, self.policy.get("parameters.backgroundPolicy"),
            self.policy.get("parameters.subtractBackground"))

        #output products
        clipboard.put(self.policy.get("outputKeys.background"), background)
        if backgroundSubtractedExposure:
            clipboard.put(
                self.policy.get("outputKeys.backgroundSubtractedExposure"),
                backgroundSubtractedExposure)
Пример #28
0
class DiffImStageParallel(harnessStage.ParallelProcessing):
    """
    Description:
        This stage wraps image subtraction        

    Policy Dictionary:
    lsst/ip/pipeline/policy/DiffImStageDictionary.paf

    Clipboard Input:
    - Template Exposure : to be convolved
    - Science Exposure  : to be matched to

    Clipboard Output:
    - Difference Exposure : resulting difference image
    - Psf Matching Kernel : the spatial model of the Psf matching Kernel
    - Background Function : differential background model
    """
    def setup(self):
        self.log   = Log(self.log, "DiffImStage - parallel")
        policyFile = pexPolicy.DefaultPolicyFile("ip_pipeline",
                                                 "DiffImStageDictionary.paf", "policy")
        defPolicy  = pexPolicy.Policy.createPolicy(policyFile,
                                                   policyFile.getRepositoryPath(), # repos
                                                   True)                           # validate

        if self.policy is None:
            self.policy = pexPolicy.Policy()
        self.policy.mergeDefaults(defPolicy.getDictionary())
        self.diffImPolicy = ipDiffim.makeDefaultPolicy()

    def process(self, clipboard):
        """
        Run image subtraction
        """
        self.log.log(Log.INFO, "Running image subtraction")
        
        # grab exposures from clipboard
        templateExposure = clipboard.get(self.policy.getString("inputKeys.templateExposureKey"))
        scienceExposure  = clipboard.get(self.policy.getString("inputKeys.scienceExposureKey"))

        # run image subtraction
        psfMatch = ipDiffim.ImagePsfMatch(self.diffImPolicy)
        results = psfMatch.subtractExposures(templateExposure, scienceExposure)
        
        # parse results
        differenceExposure, spatialKernel, backgroundModel, kernelCellSet = results

        #output products
        clipboard.put(self.policy.get("outputKeys.differenceExposureKey"), differenceExposure)
        clipboard.put(self.policy.get("outputKeys.psfMatchingKernelKey"), spatialKernel)
        clipboard.put(self.policy.get("outputKeys.backgroundFunctionKey"), backgroundModel)
Пример #29
0
    def populateClipboard(self, inputParamPropertySetPtr, iStage, eventTopic):
        """
        Place the event payload onto the Clipboard
        """
        log = Log(self.log, "populateClipboard")
        log.log(Log.DEBUG, 'Python Pipeline populateClipboard')

        queue = self.queueList[iStage - 1]
        clipboard = queue.element()

        # Slice does not disassemble the payload of the event.
        # It knows nothing of the contents.
        # It simply places the payload on the clipboard with key of the eventTopic
        clipboard.put(eventTopic, inputParamPropertySetPtr)
Пример #30
0
    def populateClipboard(self, inputParamPropertySetPtr, iStage, eventTopic):
        """
        Place the event payload onto the Clipboard
        """
        log = Log(self.log, "populateClipboard");
        log.log(Log.DEBUG,'Python Pipeline populateClipboard');

        queue = self.queueList[iStage-1]
        clipboard = queue.element()

        # Slice does not disassemble the payload of the event. 
        # It knows nothing of the contents. 
        # It simply places the payload on the clipboard with key of the eventTopic
        clipboard.put(eventTopic, inputParamPropertySetPtr)
class PsfDeterminationStageParallel(harnessStage.ParallelProcessing):
    """
    Given an exposure and a set of sources measured on that exposure,
    determine a PSF for that exposure.

    This stage works on lists of (exposure, sourceSet) pairs.

    Their location on the clipboard is specified via policy.
    see lsst/meas/pipeline/pipeline/PsfDeterminationStageDictionary.paf
    for details on configuring valid stage policies
    """
    def setup(self):
        self.log = Log(self.log, "PsfDeterminationStage - parallel")

        policyFile = pexPolicy.DefaultPolicyFile(
            "meas_pipeline", "PsfDeterminationStageDictionary.paf", "policy")
        defPolicy = pexPolicy.Policy.createPolicy(
            policyFile, policyFile.getRepositoryPath(), True)

        if self.policy is None:
            self.policy = pexPolicy.Policy()
        self.policy.mergeDefaults(defPolicy.getDictionary())

        starSelectorName = self.policy.get("starSelectorName")
        starSelectorPolicy = self.policy.getPolicy("starSelectorPolicy")
        self.starSelector = measAlg.makeStarSelector(starSelectorName,
                                                     starSelectorPolicy)

        psfDeterminerName = self.policy.get("psfDeterminerName")
        psfDeterminerPolicy = self.policy.getPolicy("psfDeterminerPolicy")
        self.psfDeterminer = measAlg.makePsfDeterminer(psfDeterminerName,
                                                       psfDeterminerPolicy)

    def process(self, clipboard):
        self.log.log(Log.INFO, "Estimating PSF is in process")

        #grab exposure from clipboard
        exposure = clipboard.get(self.policy.get("inputKeys.exposure"))
        sourceSet = clipboard.get(self.policy.get("inputKeys.sourceSet"))

        psfCandidateList = self.starSelector.selectStars(exposure, sourceSet)
        metadata = dafBase.PropertySet()
        psf, psfCellSet = self.psfDeterminer.determinePsf(
            exposure, psfCandidateList, metadata)
        self.log.log(Log.INFO, "Calling exposure.setPsf(psf) in stage code")
        exposure.setPsf(psf)

        clipboard.put(self.policy.get("outputKeys.psf"), psf)
        clipboard.put(self.policy.get("outputKeys.cellSet"), psfCellSet)
        clipboard.put(self.policy.get("outputKeys.metadata"), metadata)
class BackgroundEstimationStageParallel(harnessStage.ParallelProcessing):
    """
    Description:
        This stage wraps estimating and possibly subtracting the background from an exposure
        on the clipboard.        

    Policy Dictionary:
    lsst/meas/pipeline/policy/BackgroundEstimationStageDictionary.paf

    Clipboard Input:
    - Calibrated science Exposure(s) (including background)

    ClipboardOutput:
    - background subtracted Exposure used in the detection. Key specified
        by policy attribute 'backgroundSubtractedExposure'
    - the measured background object itself. Key specified by policy 
        attribute 'background'        
    """
    def setup(self):
        self.log = Log(self.log, "BackgroundEstimationStage - parallel")

        policyFile = pexPolicy.DefaultPolicyFile("meas_pipeline", 
                                                 "BackgroundEstimationStageDictionary.paf", "policy")
        defPolicy = pexPolicy.Policy.createPolicy(policyFile, policyFile.getRepositoryPath(), True)

        if self.policy is None:
            self.policy = pexPolicy.Policy()
        self.policy.mergeDefaults(defPolicy.getDictionary())

    def process(self, clipboard):
        """
        Detect sources in the worker process
        """
        self.log.log(Log.INFO, "Subtracting background in process")
        
        #grab exposure from clipboard
        exposure = clipboard.get(self.policy.get("inputKeys.exposure"))
            
        #estimate and maybe subtract the background
        background, backgroundSubtractedExposure = sourceDetection.estimateBackground(
            exposure,
            self.policy.get("parameters.backgroundPolicy"),
            self.policy.get("parameters.subtractBackground"))

        #output products
        clipboard.put(self.policy.get("outputKeys.background"), background)
        if backgroundSubtractedExposure:
            clipboard.put(self.policy.get("outputKeys.backgroundSubtractedExposure"),
                          backgroundSubtractedExposure)
Пример #33
0
    def postprocess(self, clipboard):
        """
        Processing code for this Stage to be executed by the main Pipeline 
        after the completion of Slice process 
        """

        log = Log(self.log,
                  "lsst.pexhexamples.pipeline.SampleStageSerial.postprocess")
        log.log(Log.INFO, 'Executing SampleStageSerial postprocess')

        lr = LogRec(log, Log.INFO)
        lr << " rank " + str(self.rank)
        lr << " stageId " + str(self.stageId)
        lr << " universeSize " + str(self.universeSize)
        lr << " RunMode from Policy " + self.runmode
        lr << LogRec.endr
class PsfDeterminationStageParallel(harnessStage.ParallelProcessing):
    """
    Given an exposure and a set of sources measured on that exposure,
    determine a PSF for that exposure.

    This stage works on lists of (exposure, sourceSet) pairs.

    Their location on the clipboard is specified via policy.
    see lsst/meas/pipeline/pipeline/PsfDeterminationStageDictionary.paf
    for details on configuring valid stage policies
    """
    def setup(self):
        self.log = Log(self.log, "PsfDeterminationStage - parallel")

        policyFile = pexPolicy.DefaultPolicyFile("meas_pipeline", 
                                                 "PsfDeterminationStageDictionary.paf", "policy")
        defPolicy = pexPolicy.Policy.createPolicy(policyFile, policyFile.getRepositoryPath(), True)
        
        if self.policy is None:
            self.policy = pexPolicy.Policy()
        self.policy.mergeDefaults(defPolicy.getDictionary())

        starSelectorName = self.policy.get("starSelectorName")
        starSelectorPolicy = self.policy.getPolicy("starSelectorPolicy")
        self.starSelector = measAlg.makeStarSelector(starSelectorName, starSelectorPolicy)

        psfDeterminerName = self.policy.get("psfDeterminerName")
        psfDeterminerPolicy = self.policy.getPolicy("psfDeterminerPolicy")
        self.psfDeterminer = measAlg.makePsfDeterminer(psfDeterminerName, psfDeterminerPolicy)

        
    def process(self, clipboard):
        self.log.log(Log.INFO, "Estimating PSF is in process")

        #grab exposure from clipboard
        exposure = clipboard.get(self.policy.get("inputKeys.exposure"))       
        sourceSet = clipboard.get(self.policy.get("inputKeys.sourceSet"))

        psfCandidateList = self.starSelector.selectStars(exposure, sourceSet)
        metadata = dafBase.PropertySet()
        psf, psfCellSet = self.psfDeterminer.determinePsf(exposure, psfCandidateList, metadata)
        self.log.log(Log.INFO, "Calling exposure.setPsf(psf) in stage code")
        exposure.setPsf(psf)
        
        clipboard.put(self.policy.get("outputKeys.psf"), psf)
        clipboard.put(self.policy.get("outputKeys.cellSet"), psfCellSet)
        clipboard.put(self.policy.get("outputKeys.metadata"), metadata)
Пример #35
0
class ProvenanceRecorder(object):
    """
    an abstract interface for recording prodution-level policy data as
    provenance into a particular database.  A provenance consumer
    instance (usually a DatabaseConfigurator, from ctrl.orca) will
    instantiate a subclass that is wired for that particular
    provenance store.
    """

    def __init__(self, logger=None, fromSub=False):
        """
        As this class is abstract, it should only be executed from a
        subclass's constructor, in which case fromSub should be set to
        True.
        @param logger    a logger to use for messages.  This will be
                            passed to each recorder.  If null, a
                            logger will be created as needed.
        @param fromSub   set this to True to indicate that it is being called
                            from a subclass constructor.  If False (default),
                            an exception will be raised under the assumption
                            that one is trying instantiate it directly.
        """
        # subclasses may wish to use a different logger name
        if not logger:
            logger = Log.getDefaultLog()
        self._logger = Log(logger, "provenance")

        if not fromSub:
            raise RuntimeError("Attempt to instantiate abstract class, " +
                               "ProvenanceRecorder; see class docs")

    def recordEnv(self):
        """
        Record the software and/or hardware environment.
        """
        self._logger.log(Log.DEBUG,
                         "no implementation for recording environment")

    def record(self, filename):
        """
        send the contents of the given file to the provenance store.
        """
        msg = 'called "abstract" Provenance.record'
        if self._logger:
            self._logger.log(Log.FATAL, msg)
        raise RuntimeError(msg)
class ProvenanceRecorder(object):
    """
    an abstract interface for recording prodution-level policy data as
    provenance into a particular database.  A provenance consumer
    instance (usually a DatabaseConfigurator, from ctrl.orca) will
    instantiate a subclass that is wired for that particular
    provenance store.
    """
    def __init__(self, logger=None, fromSub=False):
        """
        As this class is abstract, it should only be executed from a
        subclass's constructor, in which case fromSub should be set to
        True.
        @param logger    a logger to use for messages.  This will be
                            passed to each recorder.  If null, a
                            logger will be created as needed.
        @param fromSub   set this to True to indicate that it is being called
                            from a subclass constructor.  If False (default),
                            an exception will be raised under the assumption
                            that one is trying instantiate it directly.
        """
        # subclasses may wish to use a different logger name
        if not logger:
            logger = Log.getDefaultLog()
        self._logger = Log(logger, "provenance")

        if not fromSub:
            raise RuntimeError("Attempt to instantiate abstract class, " +
                               "ProvenanceRecorder; see class docs")

    def recordEnv(self):
        """
        Record the software and/or hardware environment.
        """
        self._logger.log(Log.DEBUG,
                         "no implementation for recording environment")

    def record(self, filename):
        """
        send the contents of the given file to the provenance store.
        """
        msg = 'called "abstract" Provenance.record'
        if self._logger:
            self._logger.log(Log.FATAL, msg)
        raise RuntimeError(msg)
Пример #37
0
class SimpleDiffImStageParallel(harnessStage.ParallelProcessing):
    """
    Description:
        Subtract two almost-identical Exposures

    Policy Dictionary:
        lsst/ip/pipeline/policy/simpleDiffImStageDictionary.paf

    Clipboard Input:
    - Two calibrated science Exposures

    ClipboardOutput:
    - Difference Exposure
    """
    def setup(self):
        self.log = Log(self.log, "simpleDiffImStage - parallel")

        policyFile = pexPolicy.DefaultPolicyFile("ip_pipeline", "SimpleDiffImStageDictionary.paf", "policy")
        defPolicy = pexPolicy.Policy.createPolicy(policyFile, policyFile.getRepositoryPath(), True)

        if self.policy is None:
            self.policy = pexPolicy.Policy()
        self.policy.mergeDefaults(defPolicy.getDictionary())

    def process(self, clipboard):
        """
        Subtract two almost-identical Exposures
        """
        self.log.log(Log.INFO, "Differencing two Exposures in process")
        
        #grab exposure from clipboard
        exposures = []
        for k in self.policy.getArray("inputKeys.exposures"):
            exposures.append(clipboard.get(k))

        mi0 = exposures[0].getMaskedImage()
        diff = mi0.Factory(mi0, True)
        diff -= exposures[1].getMaskedImage()

        differenceExposure = afwImage.makeExposure(diff, exposures[0].getWcs())
        differenceExposure.setMetadata(exposures[0].getMetadata())
        differenceExposure.getMaskedImage().setXY0(exposures[0].getXY0())

        #output products
        clipboard.put(self.policy.get("outputKeys.differenceExposure"), differenceExposure)
Пример #38
0
class FakeInput(harnessStage.ParallelProcessing):
    """
    this stage logs the dataset we're supposed to be reading in
    """
    def setup(self):
        if not self.log:
            self.log = Log.getDefaultLog()
        self.mylog = Log(self.log, "inputStage")
        self.inputDatasetKey = \
                    self.policy.getString("inputKeys.inputDatasets")

    def process(self, clipboard):
        inputs = clipboard.get(self.inputDatasetKey)
        if inputs:
            for ds in inputs:
                self.mylog.log(Log.INFO, "Loading " + ds.toString())
        else:
            self.mylog.log(Log.WARN, "No input datasets given")
class MakePersistableSourceVectorStageParallel(harnessStage.ParallelProcessing):
    def setup(self):
        self.log = Log(self.log, "MakePersistableSourceVectorStage - parallel")

        policyFile = pexPolicy.DefaultPolicyFile("meas_pipeline", 
            "MakePersistableSourceVectorStageDictionary.paf", "policy")
        defPolicy = pexPolicy.Policy.createPolicy(policyFile, policyFile.getRepositoryPath(), True)

        if self.policy is None:
            self.policy = defPolicy
        else:
            self.policy.mergeDefaults(defPolicy.getDictionary())

    def process(self, clipboard):
        self.log.log(Log.INFO, "Making a persistable source vector in process")

        sourceSet = clipboard.get(self.policy.get("inputkeys.sourceSet"))
        clipboard.put(self.policy.get("outputKeys.persistable"), afwDet.PersistableSourceVector(sourceSet))
Пример #40
0
class ApertureCorrectionStageParallel(harnessStage.ParallelProcessing):
    """
    Given an exposure and a set of sources measured on that exposure,
    determine the aperture correction for that exposure.

    This stage works on lists of (exposure, sourceSet) pairs.

    Their location on the clipboard is specified via policy.
    see lsst/meas/pipeline/pipeline/ApertureCorrectionStageDictionary.paf
    for details on configuring valid stage policies
    """
    def setup(self):
        self.log = Log(self.log, "ApertureCorrectionStage - parallel")

        # aperture correction policy
        apCorrPolicyFile = pexPolicy.DefaultPolicyFile(
            "meas_pipeline", "ApertureCorrectionStageDictionary.paf", "policy")
        defPolicy = pexPolicy.Policy.createPolicy(
            apCorrPolicyFile, apCorrPolicyFile.getRepositoryPath(), True)

        if self.policy is None:
            self.policy = pexPolicy.Policy()
        self.policy.mergeDefaults(defPolicy.getDictionary())

        self.ApCorrPolicy = self.policy.get(
            "parameters.ApertureCorrectionPolicy")

    def process(self, clipboard):
        self.log.log(Log.INFO, "Estimating Aperture Correction is in process")

        #grab exposure from clipboard
        exposure = clipboard.get(self.policy.get("inputKeys.exposure"))
        cellSet = clipboard.get(self.policy.get("inputKeys.cellSet"))

        metadata = dafBase.PropertySet()
        apCorrCtrl = measAlg.ApertureCorrectionControl(self.ApCorrPolicy)
        apCorr = measAlg.ApertureCorrection(exposure,
                                            cellSet,
                                            metadata,
                                            apCorrCtrl,
                                            log=self.log)

        clipboard.put(self.policy.get("outputKeys.apCorr"), apCorr)
        clipboard.put(self.policy.get("outputKeys.metadata"), metadata)
Пример #41
0
    def threadBarrier(self, iStage):
        """
        Create an approximate barrier where all Slices intercommunicate with the Pipeline 
        """

        log = Log(self.log, "threadBarrier")

        self.checkExitBySyncPoint()

        # if((self._stop.isSet()) and (self.exitLevel == 2)):

        #     log.log(Log.INFO, "Pipeline stop is set at exitLevel of 2; exit here at a synchronization point")
        #     print "Pipeline stop is set at exitLevel of 2; exit here at a synchronization point"
        # os._exit()
        #    sys.exit()
        #    log.log(Log.INFO, "Pipeline Ever reach here ?? ")

        entryTime = time.time()
        log.log(Log.DEBUG, "Entry time %f" % (entryTime))

        for i in range(self.nSlices):
            k = 2 * i
            loopEventA = self.loopEventList[k]
            loopEventB = self.loopEventList[k + 1]

            signalTime1 = time.time()
            log.log(Log.DEBUG, "Signal to Slice  %d %f" % (i, signalTime1))

            loopEventA.set()

            log.log(Log.DEBUG, "Wait for signal from Slice %d" % (i))

            # Wait for the B event to be set by the Slice
            # Excute time sleep in between checks to free the GIL periodically
            useDelay = self.barrierDelay

            if (iStage == 1):
                useDelay = 0.1
            if (iStage == 290):
                useDelay = 0.1

            while (not (loopEventB.isSet())):
                time.sleep(useDelay)

            signalTime2 = time.time()
            log.log(
                Log.DEBUG,
                "Done waiting for signal from Slice %d %f" % (i, signalTime2))

            if (loopEventB.isSet()):
                loopEventB.clear()

        self.checkExitBySyncPoint()
Пример #42
0
class IsrOverscanStageParallel(harnessStage.ParallelProcessing):
    """
    Description:

    Policy Dictionary:

    Clipboard Input:

    ClipboardOutput:
    """
    def setup(self):
        self.log = Log(self.log, "IsrOverscanStage - parallel")

        policyFile = pexPolicy.DefaultPolicyFile(
            "ip_pipeline", "IsrOverscanStageDictionary.paf", "policy")
        defPolicy = pexPolicy.Policy.createPolicy(
            policyFile, policyFile.getRepositoryPath())

        if self.policy is None:
            self.policy = pexPolicy.Policy()
        self.policy.mergeDefaults(defPolicy)

    def process(self, clipboard):
        """
        """
        self.log.log(Log.INFO, "Doing overscan subtraction.")

        #grab exposure and overscan bbox from clipboard
        exposure = clipboard.get(self.policy.getString("inputKeys.exposure"))
        fittype = self.policy.getString("parameters.overscanFitType")
        amp = cameraGeom.cast_Amp(exposure.getDetector())
        overscanBbox = amp.getDiskBiasSec()
        dataBbox = amp.getDiskDataSec()
        #It just so happens that this is an o.k. place to put the SDQA
        #calculation because the ratings requested at the moment can all be
        #calculated here.  If, for example, an Amp rating an the flat fielded
        #amp were requested, it would have to be calculated separately.
        ipIsr.calculateSdqaAmpRatings(exposure, overscanBbox, dataBbox)
        ipIsr.overscanCorrection(exposure, overscanBbox, fittype)
        #TODO optionally trim
        #output products
        clipboard.put(self.policy.get("outputKeys.overscanCorrectedExposure"),
                      exposure)
Пример #43
0
class FakeInput(harnessStage.ParallelProcessing):
    """
    this stage logs the dataset we're supposed to be reading in
    """

    def setup(self):
        if not self.log:
            self.log = Log.getDefaultLog()
        self.mylog = Log(self.log, "inputStage")
        self.inputDatasetKey = \
                    self.policy.getString("inputKeys.inputDatasets")

    def process(self, clipboard):
        inputs = clipboard.get(self.inputDatasetKey)
        if inputs:
            for ds in inputs:
                self.mylog.log(Log.INFO, "Loading " + ds.toString())
        else:
            self.mylog.log(Log.WARN, "No input datasets given")
class SourceMeasurementPsfFluxStageParallel(harnessStage.ParallelProcessing):
    """
    Description:
        This stage wraps the measurement of sources on an exposure.
        The exposures to measure on should be in the clipboard along with the
        FootprintSet(s) to measure on those exposures. The keys for the
        exposures, and the FootprintSet(s) can be specified in the 
        policy file. If not specified, default keys will be used
    """
    def setup(self):
        self.log = Log(self.log, "SourceMeasurementPsfFluxStage - parallel")

        policyFile = pexPolicy.DefaultPolicyFile(
            "meas_pipeline", "SourceMeasurementPsfFluxStageDictionary.paf",
            "policy")
        defPolicy = pexPolicy.Policy.createPolicy(
            policyFile, policyFile.getRepositoryPath(), True)

        if self.policy is None:
            self.policy = defPolicy
        else:
            self.policy.mergeDefaults(defPolicy.getDictionary())

    def process(self, clipboard):
        """
        Measure sources in the worker process
        """
        self.log.log(Log.INFO, "Measuring Sources' psfFluxes in process")

        #this may raise exceptions
        try:
            exposure, sourceSet = self.getClipboardData(clipboard)
        except pexExcept.LsstException, e:
            self.log.log(Log.FATAL, str(e))

        srcMeas.sourceMeasurementPsfFlux(exposure, sourceSet)

        # place SourceSet on the clipboard.  We need to do this because of the _persistable version
        # even though there's already a sourceSet. Damn.
        sourceKey = self.policy.get("outputKeys.sourceSet")
        clipboard.put(sourceKey, sourceSet)
        clipboard.put(sourceKey + "_persistable",
                      afwDet.PersistableSourceVector(sourceSet))
Пример #45
0
    def threadBarrier(self, iStage): 
        """
        Create an approximate barrier where all Slices intercommunicate with the Pipeline 
        """

        log = Log(self.log, "threadBarrier")

        self.checkExitBySyncPoint()

        # if((self._stop.isSet()) and (self.exitLevel == 2)):

        #     log.log(Log.INFO, "Pipeline stop is set at exitLevel of 2; exit here at a synchronization point")
        #     print "Pipeline stop is set at exitLevel of 2; exit here at a synchronization point" 
            # os._exit() 
        #    sys.exit()
        #    log.log(Log.INFO, "Pipeline Ever reach here ?? ")

        entryTime = time.time()
        log.log(Log.DEBUG, "Entry time %f" % (entryTime)) 
        

        for i in range(self.nSlices):
            k = 2*i
            loopEventA = self.loopEventList[k]
            loopEventB = self.loopEventList[k+1]

            signalTime1 = time.time()
            log.log(Log.DEBUG, "Signal to Slice  %d %f" % (i, signalTime1)) 

            loopEventA.set()

            log.log(Log.DEBUG, "Wait for signal from Slice %d" % (i)) 

            # Wait for the B event to be set by the Slice
            # Excute time sleep in between checks to free the GIL periodically 
            useDelay = self.barrierDelay

            if(iStage == 1): 
                useDelay = 0.1
            if(iStage == 290): 
                useDelay = 0.1

            while( not (loopEventB.isSet())):
                 time.sleep(useDelay)

            signalTime2 = time.time()
            log.log(Log.DEBUG, "Done waiting for signal from Slice %d %f" % (i, signalTime2)) 

            if(loopEventB.isSet()):
                loopEventB.clear()

        self.checkExitBySyncPoint()
class MultifitFlagIngestStageParallel(harnessStage.ParallelProcessing):
    """
    PT1.2 Hack to ingest status flags from meas_multifit into source
    """
    def setup(self):
        self.log = Log(self.log, "MultifitFlagIngestStage - parallel")

        
        # default policy
        policyFile = pexPolicy.DefaultPolicyFile(\
                "meas_pipeline", 
                "MultifitFlagIngestStageDictionary.paf", "policy")
        defPolicy = pexPolicy.Policy.createPolicy(policyFile,
                                                  policyFile.getRepositoryPath(), True)
        
        if self.policy is None:
            self.policy = pexPolicy.Policy()
        self.policy.mergeDefaults(defPolicy.getDictionary())

    def process(self, clipboard):
        self.log.log(Log.INFO, "MultifitFlagIngestStage is starting")

        statusSchema = lsst.afw.detection.Schema(\
                "flag", self.policy.get("parameters.flagSchemaId"),\
                lsst.afw.detection.Schema.INT)

        #grab sourceSet and apertureCorrection from clipboard
        # correct psf flux in situ
        sourceSet = clipboard.get(self.policy.get("inputKeys.sourceSet"))
        algorithm = self.policy.get("parameters.algorithm")
        for s in sourceSet:
            if(s.getPhotometry()):
                try:
                    photom = s.getPhotometry().find(algorithm)                    
                except:
                    self.log.log(Log.WARN,\
                            "%s measurement not found in photometry for source %d"%\
                            (algorithm, s.getSourceId()))
                    continue

                status = int(photom.get(statusSchema))
                s.setFlagForAssociation(status)
class ApertureCorrectionStageParallel(harnessStage.ParallelProcessing):
    """
    Given an exposure and a set of sources measured on that exposure,
    determine the aperture correction for that exposure.

    This stage works on lists of (exposure, sourceSet) pairs.

    Their location on the clipboard is specified via policy.
    see lsst/meas/pipeline/pipeline/ApertureCorrectionStageDictionary.paf
    for details on configuring valid stage policies
    """
    def setup(self):
        self.log = Log(self.log, "ApertureCorrectionStage - parallel")

        # aperture correction policy
        apCorrPolicyFile = pexPolicy.DefaultPolicyFile("meas_pipeline", 
                                                       "ApertureCorrectionStageDictionary.paf", "policy")
        defPolicy = pexPolicy.Policy.createPolicy(apCorrPolicyFile,
                                                  apCorrPolicyFile.getRepositoryPath(), True)

        if self.policy is None:
            self.policy = pexPolicy.Policy()
        self.policy.mergeDefaults(defPolicy.getDictionary())

        self.ApCorrPolicy = self.policy.get("parameters.ApertureCorrectionPolicy")

    def process(self, clipboard):
        self.log.log(Log.INFO, "Estimating Aperture Correction is in process")

        
        #grab exposure from clipboard
        exposure = clipboard.get(self.policy.get("inputKeys.exposure"))       
        cellSet = clipboard.get(self.policy.get("inputKeys.cellSet"))
        
        metadata = dafBase.PropertySet()
        apCorrCtrl = measAlg.ApertureCorrectionControl(self.ApCorrPolicy)
        apCorr = measAlg.ApertureCorrection(exposure, cellSet, metadata,
                                                       apCorrCtrl, log=self.log)
        

        clipboard.put(self.policy.get("outputKeys.apCorr"), apCorr)
        clipboard.put(self.policy.get("outputKeys.metadata"), metadata)
Пример #48
0
    def process(self, clipboard):
        """
        Processing code for this Stage to be executed by the Slices 
        """
        log = Log(
            self.log,
            "lsst.pexhexamples.pipeline.SampleFailingStageSerial.preprocess")

        log.log(Log.INFO, 'Executing SampleFailingStageParallel process')

        if (self.runmode == "process"):

            oneData = lsst.daf.base.PropertySet()
            oneData.setString("message", "Calculating variance")
            clipboard.put("onedata", oneData)

            # Raise a bogus error
            raise RuntimeError(
                "SampleFailingStageParallel: Divide by zero occurred in process"
            )
class IsrSaturationStageParallel(harnessStage.ParallelProcessing):
    """
    Description:

    Policy Dictionary:

    Clipboard Input:

    ClipboardOutput:
    """
    def setup(self):
        self.log = Log(self.log, "IsrSaturationStage - parallel")

        policyFile = pexPolicy.DefaultPolicyFile(
            "ip_pipeline", "IsrSaturationStageDictionary.paf", "policy")
        defPolicy = pexPolicy.Policy.createPolicy(
            policyFile, policyFile.getRepositoryPath())

        if self.policy is None:
            self.policy = pexPolicy.Policy()
        self.policy.mergeDefaults(defPolicy)

    def process(self, clipboard):
        """
        """
        self.log.log(Log.INFO, "Doing Saturation correction.")

        #grab exposure from clipboard
        exposure = clipboard.get(self.policy.getString("inputKeys.exposure"))
        metadata = exposure.getMetadata()
        exposure = ipIsr.convertImageForIsr(exposure)
        fwhm = self.policy.getDouble("parameters.defaultFwhm")
        amp = cameraGeom.cast_Amp(exposure.getDetector())
        saturation = amp.getElectronicParams().getSaturationLevel()
        bboxes = ipIsr.saturationDetection(exposure,
                                           int(saturation),
                                           doMask=True)
        self.log.log(Log.INFO, "Found %i saturated regions." % (len(bboxes)))
        #output products
        clipboard.put(self.policy.get("outputKeys.saturationMaskedExposure"),
                      exposure)
class IsrCcdAssemblyStageParallel(harnessStage.ParallelProcessing):
    """
    Description:

    Policy Dictionary:

    Clipboard Input:

    ClipboardOutput:
    """
    def setup(self):
        self.log = Log(self.log, "CcdAssembly -- Parallel")

        policyFile = pexPolicy.DefaultPolicyFile(
            "ip_pipeline", "IsrCcdAssemblyStageDictionary.paf", "policy")
        defPolicy = pexPolicy.Policy.createPolicy(
            policyFile, policyFile.getRepositoryPath())

        if self.policy is None:
            self.policy = pexPolicy.Policy()
        self.policy.mergeDefaults(defPolicy)

    def process(self, clipboard):
        """
        """
        self.log.log(Log.INFO, "Doing CCD assembly.")

        #grab exposure from clipboard
        exposureList = clipboard.get(
            self.policy.getString("inputKeys.exposureList"))
        rmKeys = self.policy.getArray("parameters.deleteFieldsList")
        amp = cameraGeom.cast_Amp(exposureList[0].getDetector())
        ccdId = amp.getParent().getId()
        ccd = cameraGeom.cast_Ccd(amp.getParent())
        exposure = ipIsr.ccdAssemble.assembleCcd(exposureList,
                                                 ccd,
                                                 keysToRemove=rmKeys)
        #output products
        clipboard.put(self.policy.get("outputKeys.assembledCcdExposure"),
                      exposure)
class IsrOverscanStageParallel(harnessStage.ParallelProcessing):
    """
    Description:

    Policy Dictionary:

    Clipboard Input:

    ClipboardOutput:
    """
    def setup(self):
        self.log = Log(self.log, "IsrOverscanStage - parallel")

        policyFile = pexPolicy.DefaultPolicyFile("ip_pipeline", "IsrOverscanStageDictionary.paf", "policy")
        defPolicy = pexPolicy.Policy.createPolicy(policyFile, policyFile.getRepositoryPath())

        if self.policy is None:
            self.policy = pexPolicy.Policy()
        self.policy.mergeDefaults(defPolicy)

    def process(self, clipboard):
        """
        """
        self.log.log(Log.INFO, "Doing overscan subtraction.")
        
        #grab exposure and overscan bbox from clipboard
        exposure = clipboard.get(self.policy.getString("inputKeys.exposure"))
	fittype = self.policy.getString("parameters.overscanFitType")
	amp = cameraGeom.cast_Amp(exposure.getDetector())
        overscanBbox = amp.getDiskBiasSec()
        dataBbox = amp.getDiskDataSec()
        #It just so happens that this is an o.k. place to put the SDQA
        #calculation because the ratings requested at the moment can all be
        #calculated here.  If, for example, an Amp rating an the flat fielded
        #amp were requested, it would have to be calculated separately.
        ipIsr.calculateSdqaAmpRatings(exposure, overscanBbox, dataBbox)
        ipIsr.overscanCorrection(exposure, overscanBbox, fittype)
        #TODO optionally trim
        #output products
        clipboard.put(self.policy.get("outputKeys.overscanCorrectedExposure"), exposure)
Пример #52
0
class IsrDarkStageParallel(harnessStage.ParallelProcessing):
    """
    Description:

    Policy Dictionary:

    Clipboard Input:

    ClipboardOutput:
    """
    def setup(self):
        self.log = Log(self.log, "IsrDarkStage - parallel")

        policyFile = pexPolicy.DefaultPolicyFile("ip_pipeline",
                                                 "IsrDarkStageDictionary.paf",
                                                 "policy")
        defPolicy = pexPolicy.Policy.createPolicy(
            policyFile, policyFile.getRepositoryPath())

        if self.policy is None:
            self.policy = pexPolicy.Policy()
        self.policy.mergeDefaults(defPolicy)

    def process(self, clipboard):
        """
        """
        self.log.log(Log.INFO, "Doing dark subtraction.")

        #grab exposure and dark from clipboard
        darkexposure = clipboard.get(
            self.policy.getString("inputKeys.darkexposure"))
        exposure = clipboard.get(self.policy.getString("inputKeys.exposure"))
        darkscaling = darkexposure.getCalib().getExptime()
        expscaling = exposure.getCalib().getExptime()
        ipIsr.darkCorrection(exposure, darkexposure, float(expscaling),
                             float(darkscaling))

        #output products
        clipboard.put(self.policy.get("outputKeys.darkSubtractedExposure"),
                      exposure)
class SourceMeasurementPsfFluxStageParallel(harnessStage.ParallelProcessing):
    """
    Description:
        This stage wraps the measurement of sources on an exposure.
        The exposures to measure on should be in the clipboard along with the
        FootprintSet(s) to measure on those exposures. The keys for the
        exposures, and the FootprintSet(s) can be specified in the 
        policy file. If not specified, default keys will be used
    """
    def setup(self):
        self.log = Log(self.log, "SourceMeasurementPsfFluxStage - parallel")

        policyFile = pexPolicy.DefaultPolicyFile("meas_pipeline", 
            "SourceMeasurementPsfFluxStageDictionary.paf", "policy")
        defPolicy = pexPolicy.Policy.createPolicy(policyFile, policyFile.getRepositoryPath(), True)

        if self.policy is None:
            self.policy = defPolicy
        else:
            self.policy.mergeDefaults(defPolicy.getDictionary())
        
    def process(self, clipboard):
        """
        Measure sources in the worker process
        """
        self.log.log(Log.INFO, "Measuring Sources' psfFluxes in process")
        
        #this may raise exceptions
        try:
            exposure, sourceSet = self.getClipboardData(clipboard)
        except pexExcept.LsstException, e:
            self.log.log(Log.FATAL, str(e))

        srcMeas.sourceMeasurementPsfFlux(exposure, sourceSet)
        
        # place SourceSet on the clipboard.  We need to do this because of the _persistable version
        # even though there's already a sourceSet. Damn.
        sourceKey = self.policy.get("outputKeys.sourceSet")
        clipboard.put(sourceKey, sourceSet)
        clipboard.put(sourceKey + "_persistable", afwDet.PersistableSourceVector(sourceSet))
class IsrCcdAssemblyStageParallel(harnessStage.ParallelProcessing):
    """
    Description:

    Policy Dictionary:

    Clipboard Input:

    ClipboardOutput:
    """
    def setup(self):
        self.log = Log(self.log, "CcdAssembly -- Parallel")

        policyFile = pexPolicy.DefaultPolicyFile("ip_pipeline",
                "IsrCcdAssemblyStageDictionary.paf", "policy")
        defPolicy = pexPolicy.Policy.createPolicy(policyFile, policyFile.getRepositoryPath())

        if self.policy is None:
            self.policy = pexPolicy.Policy()
        self.policy.mergeDefaults(defPolicy)

    def process(self, clipboard):
        """
        """
        self.log.log(Log.INFO, "Doing CCD assembly.")
        
        #grab exposure from clipboard
        exposureList = clipboard.get(self.policy.getString("inputKeys.exposureList"))
        rmKeys = self.policy.getArray("parameters.deleteFieldsList")
        amp = cameraGeom.cast_Amp(exposureList[0].getDetector())
        ccdId = amp.getParent().getId()
        ccd = cameraGeom.cast_Ccd(amp.getParent())
        exposure = ipIsr.ccdAssemble.assembleCcd(exposureList, ccd,
                keysToRemove=rmKeys)
        #output products
        clipboard.put(self.policy.get("outputKeys.assembledCcdExposure"),
                exposure)
class IsrFlatStageParallel(harnessStage.ParallelProcessing):
    """
    Description:

    Policy Dictionary:

    Clipboard Input:

    ClipboardOutput:
    """
    def setup(self):
        self.log = Log(self.log, "IsrFlatStage - parallel")

        policyFile = pexPolicy.DefaultPolicyFile("ip_pipeline", "IsrFlatStageDictionary.paf", "policy")
        defPolicy = pexPolicy.Policy.createPolicy(policyFile, policyFile.getRepositoryPath())

        if self.policy is None:
            self.policy = pexPolicy.Policy()
        self.policy.mergeDefaults(defPolicy)

    def process(self, clipboard):
        """
        """
        self.log.log(Log.INFO, "Doing Flat correction.")
        
        #grab exposure from clipboard
        exposure = clipboard.get(self.policy.getString("inputKeys.exposure"))
        flatexposure = clipboard.get(self.policy.getString("inputKeys.flatexposure"))
	scalingtype = self.policy.getString("parameters.flatScalingType")
	if scalingtype == "USER":
            scalingvalue = self.policy.getDouble("parameters.flatScalingValue")
            ipIsr.flatCorrection(exposure, flatexposure, "USER", scalingvalue)
	else:
            ipIsr.flatCorrection(exposure, flatexposure, scalingtype)
	
        #output products
        clipboard.put(self.policy.get("outputKeys.flatCorrectedExposure"), exposure)
class IsrSaturationStageParallel(harnessStage.ParallelProcessing):
    """
    Description:

    Policy Dictionary:

    Clipboard Input:

    ClipboardOutput:
    """
    def setup(self):
        self.log = Log(self.log, "IsrSaturationStage - parallel")

        policyFile = pexPolicy.DefaultPolicyFile("ip_pipeline", "IsrSaturationStageDictionary.paf", "policy")
        defPolicy = pexPolicy.Policy.createPolicy(policyFile, policyFile.getRepositoryPath())

        if self.policy is None:
            self.policy = pexPolicy.Policy()
        self.policy.mergeDefaults(defPolicy)

    def process(self, clipboard):
        """
        """
        self.log.log(Log.INFO, "Doing Saturation correction.")
        
        #grab exposure from clipboard
        exposure = clipboard.get(self.policy.getString("inputKeys.exposure"))
        metadata = exposure.getMetadata()
        exposure = ipIsr.convertImageForIsr(exposure)
        fwhm = self.policy.getDouble("parameters.defaultFwhm")
        amp = cameraGeom.cast_Amp(exposure.getDetector())
        saturation = amp.getElectronicParams().getSaturationLevel()
        bboxes = ipIsr.saturationDetection(exposure, int(saturation), doMask = True)
        self.log.log(Log.INFO, "Found %i saturated regions."%(len(bboxes)))
        #output products
        clipboard.put(self.policy.get("outputKeys.saturationMaskedExposure"),
                exposure)
Пример #57
0
class FakeProcessing(harnessStage.ParallelProcessing):
    """
    this stage simulates work by sleeping
    """

    def setup(self):
        if not self.log:
            self.log = Log.getDefaultLog()
        self.mylog = Log(self.log, "fakeProcess")
        self.jobIdentityItem = \
                    self.policy.getString("inputKeys.jobIdentity")
        self.sleeptime = self.policy.getInt("sleep")
        self.visitCount = 0
        self.failOnVisitN = self.policy.getInt("failIteration")
        

    def process(self, clipboard):
        jobIdentity = clipboard.get(self.jobIdentityItem)
        self.mylog.log(Log.INFO, "Processing %s %s..." % (jobIdentity["type"], str(jobIdentity)))
        time.sleep(self.sleeptime)

        self.visitCount += 1
        if self.visitCount == self.failOnVisitN:
            raise RuntimeError("testing failure stage")
Пример #58
0
    def threadBarrier(self):
        """
        Create an approximate barrier where all Slices intercommunicate with the Pipeline 
        """

        log = Log(self.log, "threadBarrier")

        entryTime = time.time()
        log.log(Log.DEBUG, "Slice %d waiting for signal from Pipeline %f" % (self._rank, entryTime))

        self.loopEventA.wait()

        signalTime1 = time.time()
        log.log(Log.DEBUG, "Slice %d done waiting; signaling back %f" % (self._rank, signalTime1))

        if(self.loopEventA.isSet()):
            self.loopEventA.clear()

        self.loopEventB.set()

        signalTime2 = time.time()
        log.log(Log.DEBUG, "Slice %d sent signal back. Exit threadBarrier  %f" % (self._rank, signalTime2))