def runQuantum( self, butlerQC: pipeBase.ButlerQuantumContext, inputRefs: pipeBase.InputQuantizedConnection, outputRefs: pipeBase.OutputQuantizedConnection, ): inputs = {} for name, refs in inputRefs: inputs[name] = butlerQC.get(refs) # Record the lengths of each input catalog lengths = [] # Remove the input catalogs from the list of inputs to the run method inputCatalogs = inputs.pop("inputCatalogs") # Add in all the input catalogs into the output catalog cumulativeLength = 0 for inCatHandle in inputCatalogs: inCat = inCatHandle.get() lengths.append(len(inCat) + cumulativeLength) cumulativeLength += len(inCat) self.outputCatalog.extend(inCat, mapper=self.mapper) # Add the catalog lengths to the inputs to the run method inputs["lengths"] = lengths output = self.run(**inputs) butlerQC.put(output, outputRefs.OutputCatalog)
def runQuantum(self, task, quantum, taskDef, butler): """Execute task on a single quantum. Parameters ---------- task : `~lsst.pipe.base.PipelineTask` Task object. quantum : `~lsst.daf.butler.Quantum` Single Quantum instance. taskDef : `~lsst.pipe.base.TaskDef` Task definition structure. butler : `~lsst.daf.butler.Butler` Data butler. """ # Create a butler that operates in the context of a quantum butlerQC = ButlerQuantumContext(butler, quantum) # Get the input and output references for the task inputRefs, outputRefs = taskDef.connections.buildDatasetRefs(quantum) # Call task runQuantum() method. Any exception thrown by the task # propagates to caller. task.runQuantum(butlerQC, inputRefs, outputRefs) if taskDef.metadataDatasetName is not None: # DatasetRef has to be in the Quantum outputs, can lookup by name try: ref = quantum.outputs[taskDef.metadataDatasetName] except LookupError as exc: raise LookupError( f"Quantum outputs is missing metadata dataset type {taskDef.metadataDatasetName}," f" it could happen due to inconsistent options between Quantum generation" f" and execution") from exc butlerQC.put(task.getFullMetadata(), ref[0])
def runQuantum(self, butlerQC: pipeBase.ButlerQuantumContext, inputRefs: pipeBase.InputQuantizedConnection, outputRefs: pipeBase.OutputQuantizedConnection): inputs = butlerQC.get(inputRefs) # Add an extra argument that is to be passed to run, this is not part of # the base class runQuantum inputs['msg'] = str(butlerQC.quantum.dataId) outputs = self.run(**inputs) butlerQC.put(outputs, outputRefs) # PYTHONPATH=$PYTHONPATH:$(pwd) pipetask run -j 5 -b "$CI_HSC_GEN3_DIR"/DATA/butler.yaml -i shared/ci_hsc_output -o aptest2 --register-dataset-types -t WritingPipelineTasks.ApertureTask -d "detector in (22, 16, 23) AND visit in (90334, 903986, 903988)"
def runQuantum(self, butlerQC: pipeBase.ButlerQuantumContext, inputRefs: pipeBase.InputQuantizedConnection, outputRefs: pipeBase.OutputQuantizedConnection): inputs = butlerQC.get(inputRefs) expId, expBits = butlerQC.quantum.dataId.pack("visit_detector", returnMaxBits=True) idFactory = self.makeIdFactory(expId=expId, expBits=expBits) outputs = self.run(inputs['science'], inputs['template'], inputs['difference'], inputs['selectSources'], idFactory=idFactory) butlerQC.put(outputs, outputRefs)
def runTestQuantum(task, butler, quantum, mockRun=True): """Run a PipelineTask on a Quantum. Parameters ---------- task : `lsst.pipe.base.PipelineTask` The task to run on the quantum. butler : `lsst.daf.butler.Butler` The collection to run on. quantum : `lsst.daf.butler.Quantum` The quantum to run. mockRun : `bool` Whether or not to replace ``task``'s ``run`` method. The default of `True` is recommended unless ``run`` needs to do real work (e.g., because the test needs real output datasets). Returns ------- run : `unittest.mock.Mock` or `None` If ``mockRun`` is set, the mock that replaced ``run``. This object can be queried for the arguments ``runQuantum`` passed to ``run``. """ _resolveTestQuantumInputs(butler, quantum) butlerQc = ButlerQuantumContext(butler, quantum) connections = task.config.ConnectionsClass(config=task.config) inputRefs, outputRefs = connections.buildDatasetRefs(quantum) if mockRun: with unittest.mock.patch.object(task, "run") as mock, \ unittest.mock.patch("lsst.pipe.base.ButlerQuantumContext.put"): task.runQuantum(butlerQc, inputRefs, outputRefs) return mock else: task.runQuantum(butlerQc, inputRefs, outputRefs) return None
def runQuantum( self, butlerQC: pipeBase.ButlerQuantumContext, inputRefs: pipeBase.InputQuantizedConnection, outputRefs: pipeBase.OutputQuantizedConnection, ): """ We need to be able to take pairs of detectors from the full set of detector exposures and run the task. Then we need to put the outputs back into the butler repository with the appropriate butler dataIds. For the `outputZernikesRaw` and `outputZernikesAvg` we only have one set of values per pair of wavefront detectors so we put this in the dataId associated with the extra-focal detector. """ exposures = butlerQC.get(inputRefs.exposures) focusZVals = [exp.getMetadata()["FOCUSZ"] for exp in exposures] extraIdx, intraIdx = self.assignExtraIntraIdx(focusZVals[0], focusZVals[1]) donutCats = butlerQC.get(inputRefs.donutCatalog) camera = butlerQC.get(inputRefs.camera) outputs = self.run(exposures, donutCats, camera) butlerQC.put(outputs.donutStampsExtra, outputRefs.donutStampsExtra[extraIdx]) butlerQC.put(outputs.donutStampsIntra, outputRefs.donutStampsIntra[extraIdx])
def run_quantum(task, butler, quantum): """Run a PipelineTask on a Quantum. Parameters ---------- task : `lsst.pipe.base.PipelineTask` The task to run on the quantum. butler : `lsst.daf.butler.Butler` The collection to run on. quantum : `lsst.daf.butler.Quantum` The quantum to run. """ butler_qc = ButlerQuantumContext(butler, quantum) connections = task.config.ConnectionsClass(config=task.config) input_refs, output_refs = connections.buildDatasetRefs(quantum) task.runQuantum(butler_qc, input_refs, output_refs)
def runQuantum( self, butlerQC: pipeBase.ButlerQuantumContext, inputRefs: pipeBase.InputQuantizedConnection, outputRefs: pipeBase.OutputQuantizedConnection, ): """ We need to be able to take pairs of detectors from the full set of detector exposures and run the task. Then we need to put the outputs back into the butler repository with the appropriate butler dataIds. For the `outputZernikesRaw` and `outputZernikesAvg` we only have one set of values per pair of wavefront detectors so we put this in the dataId associated with the extra-focal detector. """ camera = butlerQC.get(inputRefs.camera) # Get the detector IDs for the wavefront sensors so # that we can appropriately match up pairs of detectors if camera.getName() == "LSSTCam": detectorMap = (obs_lsst.translators.lsstCam.LsstCamTranslator. detector_mapping()) else: raise ValueError(f"{camera.getName()} is not a valid camera name.") extraFocalIds = [ detectorMap[detName][0] for detName in self.extraFocalNames ] intraFocalIds = [ detectorMap[detName][0] for detName in self.intraFocalNames ] detectorIdArr = np.array( [exp.dataId["detector"] for exp in inputRefs.exposures]) donutCatIdArr = np.array( [dCat.dataId["detector"] for dCat in inputRefs.donutCatalog]) # Find cwfs detectors in the list of detectors being processed runExtraIds = list(set(detectorIdArr).intersection(extraFocalIds)) runExtraIds.sort() runIntraIds = list(set(detectorIdArr).intersection(intraFocalIds)) runIntraIds.sort() if len(runExtraIds) != len(runIntraIds): raise ValueError( "Unequal number of intra and extra focal detectors.") for extraId, intraId in zip(runExtraIds, runIntraIds): if abs(extraId - intraId) != 1: raise ValueError( "Intra and extra focal detectors not adjacent.") extraListIdx = np.where(detectorIdArr == extraId)[0][0] intraListIdx = np.where(detectorIdArr == intraId)[0][0] dCatExtraIdx = np.where(donutCatIdArr == extraId)[0][0] dCatIntraIdx = np.where(donutCatIdArr == intraId)[0][0] expInputs = butlerQC.get([ inputRefs.exposures[extraListIdx], inputRefs.exposures[intraListIdx] ]) dCatInputs = butlerQC.get([ inputRefs.donutCatalog[dCatExtraIdx], inputRefs.donutCatalog[dCatIntraIdx], ]) outputs = self.run(expInputs, dCatInputs, camera) butlerQC.put(outputs.donutStampsExtra, outputRefs.donutStampsExtra[extraListIdx]) # Assign both outputs to the same dataId so that we can run # Zernike estimation fully in parallel through the dataIds # of the extra-focal detectors using CalcZernikesTask. butlerQC.put(outputs.donutStampsIntra, outputRefs.donutStampsIntra[extraListIdx])