def workButler(self): """A Butler that can read and write to a Gen 3 repository (`lsst.daf.butler.Butler`, read-only). Notes ----- Assumes `repo` has been initialized. """ if self._workButler is None: try: # Hard-code the collection names because it's hard to infer the inputs from the Butler queryButler = dafButler.Butler( self.repo, writeable=True) # writeable for _workButler inputs = {"skymaps", "refcats"} for dimension in queryButler.registry.queryDataIds( 'instrument'): instrument = obsBase.Instrument.fromName( dimension["instrument"], queryButler.registry) inputs.add(instrument.makeDefaultRawIngestRunName()) inputs.add(instrument.makeCalibrationCollectionName()) inputs.update( queryButler.registry.queryCollections( re.compile(r"templates/\w+"))) # should set run=self.runName, but this breaks quantum graph generation (DM-26246) self._workButler = dafButler.Butler(butler=queryButler, collections=inputs) except OSError as e: raise RuntimeError( f"{self.repo} is not a Gen 3 repository") from e return self._workButler
def setUp(self): self.config = EstimateZernikesCwfsTaskConfig() self.task = EstimateZernikesCwfsTask(config=self.config) self.butler = dafButler.Butler(self.repoDir) self.registry = self.butler.registry self.dataIdExtra = { "instrument": "LSSTCam", "detector": 191, "exposure": self.visitNum, "visit": self.visitNum, } self.dataIdIntra = { "instrument": "LSSTCam", "detector": 192, "exposure": self.visitNum, "visit": self.visitNum, } self.testRunName = "testTaskRun" self.collectionsList = list(self.registry.queryCollections()) if self.testRunName in self.collectionsList: cleanUpCmd = writeCleanUpRepoCmd(self.repoDir, self.testRunName) runProgram(cleanUpCmd)
def makeDefaultLatissButler(location, *, extraCollections=None, writeable=False): """Create a butler for LATISS using the default collections. Parameters ---------- location : `str` The location for which to create the default butler. Valid values are 'NCSA', 'NTS' and 'summit'. extraCollections : `list` of `str` Extra input collections to supply to the butler init. writable : `bool`, optional Whether to make a writable butler. Returns ------- butler : `lsst.daf.butler.Butler` The butler. """ # TODO: DM-33849 remove this once we can use the butler API. # TODO: Add logging to which collections are going in if location not in _LOCATIONS: raise RuntimeError(f'Default butler location only supported for {_LOCATIONS}, got {location}') repodir = LATISS_REPO_LOCATION_MAP[location] LSC = LATISS_SUPPLEMENTAL_COLLECTIONS # grrr, line lengths collections = (LSC[location] if location in LSC.keys() else []) + LATISS_DEFAULT_COLLECTIONS if extraCollections: collections.extend(extraCollections) return dafButler.Butler(repodir, collections=collections, writeable=writeable, instrument='LATISS')
def _export_for_copy(dataset, repo): """Export a Gen 3 repository so that a dataset can make copies later. Parameters ---------- dataset : `lsst.ap.verify.dataset.Dataset` The dataset needing the ability to copy the repository. repo : `str` The location of the Gen 3 repository. """ butler = daf_butler.Butler(repo) with butler.export(directory=dataset.configLocation, format="yaml") as contents: # Need all detectors, even those without data, for visit definition contents.saveDataIds(butler.registry.queryDataIds({"detector"}).expanded()) contents.saveDatasets(butler.registry.queryDatasets(datasetType=..., collections=...)) # Explicitly save the calibration and chained collections. # Do _not_ include the RUN collections here because that will export # an empty raws collection, which ap_verify assumes does not exist # before ingest. targetTypes = {daf_butler.CollectionType.CALIBRATION, daf_butler.CollectionType.CHAINED} for collection in butler.registry.queryCollections(..., collectionTypes=targetTypes): contents.saveCollection(collection) # Export empty template collections contents.saveCollection("skymaps") contents.saveCollection("templates/deep")
def setUpClass(cls): """ Generate donutCatalog needed for task. """ moduleDir = getModulePath() cls.testDataDir = os.path.join(moduleDir, "tests", "testData") testPipelineConfigDir = os.path.join(cls.testDataDir, "pipelineConfigs") cls.repoDir = os.path.join(cls.testDataDir, "gen3TestRepo") cls.runName = "run1" # Check that run doesn't already exist due to previous improper cleanup butler = dafButler.Butler(cls.repoDir) registry = butler.registry collectionsList = list(registry.queryCollections()) if cls.runName in collectionsList: cleanUpCmd = writeCleanUpRepoCmd(cls.repoDir, cls.runName) runProgram(cleanUpCmd) collections = "refcats/gen2,LSSTCam/calib,LSSTCam/raw/all" instrument = "lsst.obs.lsst.LsstCam" cls.cameraName = "LSSTCam" pipelineYaml = os.path.join(testPipelineConfigDir, "testBasePipeline.yaml") pipeCmd = writePipetaskCmd(cls.repoDir, cls.runName, instrument, collections, pipelineYaml=pipelineYaml) runProgram(pipeCmd)
def setUpClass(cls): cls.root = testUtils.makeTestTempDir( os.path.abspath(os.path.dirname(__file__))) cls.addClassCleanup(testUtils.removeTestTempDir, cls.root) # Can't use in-memory datastore because JobReporter creates a # new Butler from scratch. cls.repo = dafButler.Butler(dafButler.Butler.makeRepo(cls.root), writeable=True) # White-box testing: must use real metrics, and provide datasets of # type metricvalue_*_*. butlerTests.addDataIdValue(cls.repo, "instrument", "NotACam") butlerTests.addDataIdValue(cls.repo, "detector", 101) # physical_filter needed for well-behaved visits butlerTests.addDataIdValue(cls.repo, "physical_filter", "k2021", band="k") butlerTests.addDataIdValue(cls.repo, "visit", 42) # Dependency on verify_metrics, but not on the code for computing # these metrics. butlerTests.addDatasetType( cls.repo, "metricvalue_pipe_tasks_CharacterizeImageTime", {"instrument", "visit", "detector"}, "MetricValue")
def _export_for_copy(dataset, repo): """Export a Gen 3 repository so that a dataset can make copies later. Parameters ---------- dataset : `lsst.ap.verify.dataset.Dataset` The dataset needing the ability to copy the repository. repo : `str` The location of the Gen 3 repository. """ butler = daf_butler.Butler(repo) with butler.export(directory=dataset.configLocation, format="yaml") as contents: # Need all detectors, even those without data, for visit definition contents.saveDataIds(butler.registry.queryDataIds({"detector"}).expanded()) contents.saveDatasets(butler.registry.queryDatasets(datasetType=..., collections=...)) # Explicitly save the calibration and chained collections. # Do _not_ include the RUN collections here because that will export # an empty raws collection, which ap_verify assumes does not exist # before ingest. target_types = {daf_butler.CollectionType.CALIBRATION, daf_butler.CollectionType.CHAINED} for collection in butler.registry.queryCollections(..., collectionTypes=target_types): contents.saveCollection(collection) # Export skymap collection even if it is empty contents.saveCollection(lsst.skymap.BaseSkyMap.SKYMAP_RUN_COLLECTION_NAME) # Dataset export exports visits, but need matching visit definitions as # well (DefineVisitsTask won't add them back in). contents.saveDimensionData("exposure", butler.registry.queryDimensionRecords("exposure")) contents.saveDimensionData("visit_definition", butler.registry.queryDimensionRecords("visit_definition")) contents.saveDimensionData("visit_detector_region", butler.registry.queryDimensionRecords("visit_detector_region"))
def update_butler2(event): global config2 global butler2 global registry2 try: if repo2_select.value == 'None': config2 = None butler2 = None registry2 = None collection2_select.options = [] debug_text.value = f"butler2 set to None." else: config2 = repo2_select.value.joinpath("butler.yaml") butler2 = dafButler.Butler(config=str(config2)) registry2 = butler2.registry collections2 = list(registry2.queryCollections()) collection2_select.options = collections2 collection2_select.value = collections2[0] debug_text.value = f"Successfully loaded butler2 from {config}." except: debug_text.value = f"Failed to load butler2 from {config}" # collection2_select.value = "" raise
def setUp(self): boresightRa = 0.0 boresightDec = 0.0 boresightRotAng = 0.0 self.refCatInterface = RefCatalogInterface(boresightRa, boresightDec, boresightRotAng) moduleDir = getModulePath() self.testDataDir = os.path.join(moduleDir, "tests", "testData") self.repoDir = os.path.join(self.testDataDir, "gen3TestRepo") self.butler = dafButler.Butler(self.repoDir) self.registry = self.butler.registry shardIds = self.refCatInterface.getHtmIds() self.catalogName = "cal_ref_cat" self.collections = ["refcats/gen2"] dataRefs, dataIds = self.refCatInterface.getDataRefs( shardIds, self.butler, self.catalogName, self.collections) self.dataRefs = dataRefs self.dataIds = dataIds self.config = GenerateDonutCatalogOnlineTaskConfig() self.camera = self.butler.get("camera", instrument="LSSTCam", collections=["LSSTCam/calib/unbounded"])
def _importRepository(cls, instrument, exportPath, exportFile): """Import a test repository into self.testDir Parameters ---------- instrument : `str` Full string name for the instrument. exportPath : `str` Path to location of repository to export. exportFile : `str` Filename of export data. """ cls.repo = os.path.join(cls.testDir, 'testrepo') # Make the repo and retrieve a writeable Butler _ = dafButler.Butler.makeRepo(cls.repo) butler = dafButler.Butler(cls.repo, writeable=True) # Register the instrument instrInstance = pipeBase.Instrument.from_string(instrument) instrInstance.register(butler.registry) # Import the exportFile butler.import_( directory=exportPath, filename=exportFile, transfer='symlink', skip_dimensions={'instrument', 'detector', 'physical_filter'})
def plotDiaSourcesInFocalPlane(repo, sourceTable, gridsize=(400, 400), title='', gen='gen2', instrument='DECam', collections=[]): """Plot DIA Source locations in the focal plane. Parameters ---------- repo : `str` Repository corresponding to the output of an ap_pipe run. sourceTable : `pandas.core.frame.DataFrame` Pandas dataframe with DIA Sources from an APDB. gridsize : `tuple` of form (int, int) Number of hexagons in the (x, y) directions for the hexbin plot. title : `str` String to append to the plot title, optional. gen : `str`, optional Either 'gen2' or 'gen3' instrument : `str`, optional Default is 'DECam', used with gen3 butler only collections : `list` or `str`, optional Must be provided for gen3 to load the camera properly """ if gen == 'gen2': butler = dafPersist.Butler(repo) camera = butler.get('camera') else: butler = dafButler.Butler(repo) if instrument == 'DECam': camera = DarkEnergyCamera().getCamera() else: raise NotImplementedError corners = getCcdCorners(butler, sourceTable, gen, instrument, collections) xFP_list = [] yFP_list = [] for index, row in sourceTable.iterrows(): xFP, yFP = ccd2focalPlane(row['x'], row['y'], row['ccd'], camera=camera) xFP_list.append(xFP) yFP_list.append(yFP) xFP_Series = pd.Series(xFP_list, index=sourceTable.index) yFP_Series = pd.Series(yFP_list, index=sourceTable.index) fig1 = plt.figure(figsize=(8, 8)) ax1 = fig1.add_subplot(111, aspect='equal') for index, row in corners.iterrows(): ax1.add_patch(patches.Rectangle((row[7], row[6]), -row.height, -row.width, fill=False)) ax1.text(row[7] - row.height/2, row[6] - row.width/2, '%d' % (row[1])) plt.plot(row[7] - row.height/2, row[6] - row.width/2, ',') # somehow x and y are switched... geometry is hard ax1.hexbin(yFP_Series, xFP_Series, gridsize=gridsize, bins='log', cmap='Blues') ax1.set_title('DIA Sources in focal plane coordinates %s' % (title)) ax1.set_xlabel('Focal Plane X', size=16) ax1.set_ylabel('Focal Plane Y', size=16) ax1.invert_yaxis() ax1.invert_xaxis()
def setUpClass(cls): """Setup butler, and generate an ISR processed exposure. Notes ----- DMTN-101 4.1: Process an independent bias frame through the ISR including overscan correction and bias subtraction """ repoDir = os.path.join("DATA/") butler = dafButler.Butler( repoDir, collections=['LATISS/raw/all', 'LATISS/calib', 'calib/v00']) config = ipIsr.IsrTaskConfig() config.doSaturation = True config.doSuspect = True config.doSetBadRegions = True config.doOverscan = True config.doBias = True config.doVariance = True config.doLinearize = False config.doCrosstalk = False config.doWidenSaturationTrails = False config.doBrighterFatter = False config.doDefect = False config.doSaturationInterpolation = False config.doDark = False config.doStrayLight = False config.doFlat = False config.doApplyGains = False config.doFringe = False config.doMeasureBackground = False config.doVignette = False config.doAttachTransmissionCurve = False config.doUseOpticsTransmission = False config.doUseFilterTransmission = False config.doUseSensorTransmission = False config.doUseAtmosphereTransmission = False isrTask = ipIsr.IsrTask(config=config) rawDataId = { 'detector': 0, 'exposure': 2020012800007, 'instrument': 'LATISS' } # TODO: DM-26396 # This is not an independent frame. cls.raw = butler.get('raw', dataId=rawDataId) cls.bias = butler.get('bias', rawDataId) cls.camera = butler.get('camera', rawDataId) results = isrTask.run(cls.raw, camera=cls.camera, bias=cls.bias) cls.exposure = results.outputExposure
def plotDiaSourceDensityInFocalPlane(repo, sourceTable, cmap=mpl.cm.Blues, title='', gen='gen2', instrument='DECam', collections=[]): """Plot average density of DIA Sources in the focal plane (per CCD). Parameters ---------- repo : `str` Repository corresponding to the output of an ap_pipe run. sourceTable : `pandas.core.frame.DataFrame` Pandas dataframe with DIA Sources from an APDB. cmap : `matplotlib.colors.ListedColormap` Matplotlib colormap. title : `str` String to append to the plot title, optional. gen : `str`, optional Either 'gen2' or 'gen3' instrument : `str`, optional Default is 'DECam', used with gen3 butler only collections : `list` or `str`, optional Must be provided for gen3 to load the camera properly """ ccdArea, visitArea = getCcdAndVisitSizeOnSky(repo, sourceTable, gen, instrument, collections) nVisits = len(np.unique(sourceTable['visit'].values)) ccdGroup = sourceTable.groupby('ccd') ccdSourceCount = ccdGroup.visit.count().values/nVisits/ccdArea # DIA Source count per visit per square degree, for each CCD if gen == 'gen2': butler = dafPersist.Butler(repo) else: # gen3 butler = dafButler.Butler(repo) corners = getCcdCorners(butler, sourceTable, gen, instrument, collections) norm = mpl.colors.Normalize(vmin=np.min(ccdSourceCount), vmax=np.max(ccdSourceCount)) fig1 = plt.figure(figsize=(8, 8)) ax1 = fig1.add_subplot(111, aspect='equal') for index, row in corners.iterrows(): try: averageFocalPlane = ccdGroup.get_group(int(row[1])).x.count()/nVisits/ccdArea except KeyError: averageFocalPlane = 0 # plot normalization will be weird but it won't fall over ax1.add_patch(patches.Rectangle((row[7], row[6]), -row.height, -row.width, fill=True, color=cmap(norm(averageFocalPlane)))) ax1.text(row[7]-row.height/2, row[6]-row.width/2, '%d' % (row[1]), fontsize=12) plt.plot(row[7]-row.height/2, row[6]-row.width/2, ',') ax1.set_title('Mean DIA Source density in focal plane coordinates %s' % (title)) ax1.set_xlabel('Focal Plane X', size=16) ax1.set_ylabel('Focal Plane Y', size=16) ax1 = plt.gca() ax1.invert_yaxis() ax1.invert_xaxis() sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm) sm.set_array([]) cb = plt.colorbar(sm, fraction=0.04, pad=0.04) cb.set_label('DIA Sources per sq. deg.', rotation=90)
def _testFgcmMultiFit(self, instName, testName, queryString, visits, zpOffsets): """Test running the full pipeline with multiple fit cycles. Parameters ---------- instName : `str` Short name of the instrument testName : `str` Base name of the test collection queryString : `str` Query to send to the pipetask. visits : `list` List of visits to calibrate zpOffsets : `np.ndarray` Zeropoint offsets expected """ instCamel = instName.title() configFiles = { 'fgcmBuildStarsTable': [ os.path.join(ROOT, 'config', f'fgcmBuildStarsTable{instCamel}.py') ], 'fgcmFitCycle': [os.path.join(ROOT, 'config', f'fgcmFitCycle{instCamel}.py')], 'fgcmOutputProducts': [ os.path.join(ROOT, 'config', f'fgcmOutputProducts{instCamel}.py') ] } outputCollection = f'{instName}/{testName}/unified' cwd = os.getcwd() runDir = os.path.join(self.testDir, testName) os.makedirs(runDir) os.chdir(runDir) self._runPipeline( self.repo, os.path.join(ROOT, 'pipelines', f'fgcmFullPipeline{instCamel}.yaml'), configFiles=configFiles, inputCollections=[f'{instName}/{testName}/lut', 'refcats/gen2'], outputCollection=outputCollection, queryString=queryString, registerDatasetTypes=True) os.chdir(cwd) butler = dafButler.Butler(self.repo) offsetCat = butler.get('fgcmReferenceCalibrationOffsets', collections=[outputCollection], instrument=instName) offsets = offsetCat['offset'][:] self.assertFloatsAlmostEqual(offsets, zpOffsets, atol=1e-6)
def tearDown(self): # Get Butler with updated registry self.butler = dafButler.Butler(self.repoDir) self.registry = self.butler.registry self.collectionsList = list(self.registry.queryCollections()) if self.testRunName in self.collectionsList: cleanUpCmd = writeCleanUpRepoCmd(self.repoDir, self.testRunName) runProgram(cleanUpCmd)
def _testFgcmBuildStarsTable(self, instName, testName, queryString, visits, nStar, nObs): """Test running of FgcmBuildStarsTableTask Parameters ---------- instName : `str` Short name of the instrument testName : `str` Base name of the test collection queryString : `str` Query to send to the pipetask. visits : `list` List of visits to calibrate nStar : `int` Number of stars expected nObs : `int` Number of observations of stars expected """ instCamel = instName.title() configFiles = { 'fgcmBuildStarsTable': [ os.path.join(ROOT, 'config', f'fgcmBuildStarsTable{instCamel}.py') ] } outputCollection = f'{instName}/{testName}/buildstars' self._runPipeline( self.repo, os.path.join(ROOT, 'pipelines', 'fgcmBuildStarsTable%s.yaml' % (instCamel)), configFiles=configFiles, inputCollections=[f'{instName}/{testName}/lut', 'refcats/gen2'], outputCollection=outputCollection, queryString=queryString, registerDatasetTypes=True) butler = dafButler.Butler(self.repo) visitCat = butler.get('fgcmVisitCatalog', collections=[outputCollection], instrument=instName) self.assertEqual(len(visits), len(visitCat)) starIds = butler.get('fgcmStarIds', collections=[outputCollection], instrument=instName) self.assertEqual(len(starIds), nStar) starObs = butler.get('fgcmStarObservations', collections=[outputCollection], instrument=instName) self.assertEqual(len(starObs), nObs)
def setUp(self): self.config = DonutSourceSelectorTaskConfig() self.task = DonutSourceSelectorTask() moduleDir = getModulePath() self.testDataDir = os.path.join(moduleDir, "tests", "testData") self.repoDir = os.path.join(self.testDataDir, "gen3TestRepo") self.butler = dafButler.Butler(self.repoDir) self.registry = self.butler.registry
def makeSrcTableFlags(sourceTable, objectTable, badFlagList=['base_PixelFlags_flag_bad', 'base_PixelFlags_flag_suspect', 'base_PixelFlags_flag_saturatedCenter'], gen='gen2', instrument='DECam', repo=None): """Apply flag filters to a DIA Source and a DIA Object table. Parameters ---------- sourceTable : `pandas.core.frame.DataFrame` Pandas dataframe with DIA Sources from an APDB. objectTable : `pandas.core.frame.DataFrame` Pandas dataframe with DIA Objects from the same APDB. badFlagList : `list` Names of flags presumed to each indicate a DIA Source is garbage. gen : `str`, optional Either 'gen2' or 'gen3' instrument : `str`, optional Default is 'DECam', used with gen3 butler only repo : `str`, optional Repository in which to load a butler, used with gen3 only Returns ------- flagTable : `pandas.core.frame.DataFrame` Dataframe containing unpacked DIA Source flag values. sourceTableFlags : `pandas.core.frame.DataFrame` Dataframe resulting from from merging sourceTable with flagTable. flagFilter : `pandas.core.series.Series` of `bool` Single column of booleans of length len(sourceTable). The value of flagFilter is True if one or more flags in badFlagList is True. goodSrc : `pandas.core.frame.DataFrame` Dataframe containing only DIA Sources from sourceTable with no bad flags. goodObj : `pandas.core.frame.DataFrame` Dataframe containing only DIA Objects from objectTable entirely composed of DIA Sources with no bad flags. """ if gen == 'gen3': butler = dafButler.Butler(repo) else: butler = None sourceTable = addVisitCcdToSrcTable(sourceTable, instrument=instrument, gen=gen, butler=butler) config = TransformDiaSourceCatalogConfig() unpacker = UnpackApdbFlags(config.flagMap, 'DiaSource') flagValues = unpacker.unpack(sourceTable['flags'], 'flags') flagTable = pd.DataFrame(flagValues, index=sourceTable.index) sourceTableFlags = pd.merge(sourceTable, flagTable, left_index=True, right_index=True) badFlags = [sourceTableFlags[flag] for flag in badFlagList] flagFilter = functools.reduce(operator.or_, badFlags) noFlagFilter = ~flagFilter goodSrc = sourceTableFlags.loc[noFlagFilter] goodObjIds = set(sourceTableFlags.loc[noFlagFilter, 'diaObjectId']) goodObj = objectTable.loc[objectTable['diaObjectId'].isin(goodObjIds)] return flagTable, sourceTableFlags, flagFilter, goodSrc, goodObj
def getCcdAndVisitSizeOnSky(repo, sourceTable, gen='gen2', instrument='DECam', collections=[], visit=None, detector=None): """Estimate the area of one CCD and one visit on the sky, in square degrees. Parameters ---------- repo : `str` Repository corresponding to the output of an ap_pipe run. sourceTable : `pandas.core.frame.DataFrame` Pandas dataframe with DIA Sources from an APDB. gen : `str`, optional Either 'gen2' or 'gen3' instrument : `str`, optional Default is 'DECam', used with gen3 butler only collections : `list` or `str`, optional Must be provided for gen3 to load the camera properly visit : `int` or None, optional Specific visit to use when loading representative calexp. detector : `int` or None, optional Specific detector (ccd) to use when loading representative calexp. Returns ------- ccdArea : `float` Area covered by one detector (CCD) on the sky, in square degrees visitArea : Area covered by a visit with all detectors (CCDs) on the sky, in square degrees """ visits = np.unique(sourceTable.visit) ccds = np.unique(sourceTable.ccd) nGoodCcds = len(ccds) if gen == 'gen2': butler = dafPersist.Butler(repo) if visit is None: visit = int(visits[0]) if detector is None: ccd = int(ccds[0]) calexp = butler.get('calexp', visit=visit, ccd=ccd) bbox = butler.get('calexp_bbox', visit=visit, ccd=ccd) else: # gen3 butler = dafButler.Butler(repo) if visit is None: visit = int(visits[0]) if detector is None: detector = int(ccds[0]) calexp = butler.get('calexp', collections=collections, instrument=instrument, visit=visit, detector=detector) bbox = butler.get('calexp.bbox', collections=collections, instrument=instrument, visit=visit, detector=detector) pixelScale = calexp.getWcs().getPixelScale().asArcseconds() ccdArea = (pixelScale*pixelScale*bbox.getArea()*u.arcsec**2).to(u.deg**2).value visitArea = ccdArea * nGoodCcds return ccdArea, visitArea
def instrument(self): """The Gen 3 instrument associated with this data (`lsst.obs.base.Instrument`, read-only). """ butler = dafButler.Butler(self._preloadedRepo, writeable=False) instruments = list(butler.registry.queryDataIds('instrument')) if len(instruments) != 1: raise RuntimeError( f"Dataset does not have exactly one instrument; got {instruments}." ) else: return obsBase.Instrument.fromName(instruments[0]["instrument"], butler.registry)
def setUp(self): self.boresightRa = 0.03 self.boresightDec = -0.02 self.boresightRotAng = 90.0 self.refCatInterface = RefCatalogInterface( self.boresightRa, self.boresightDec, self.boresightRotAng ) moduleDir = getModulePath() self.testDataDir = os.path.join(moduleDir, "tests", "testData") self.repoDir = os.path.join(self.testDataDir, "gen3TestRepo") self.butler = dafButler.Butler(self.repoDir)
def _checkOutputGen3(self, repo): """Perform various integrity checks on a repository. Parameters ---------- repo : `str` The repository to test. Currently only filesystem repositories are supported. """ self.assertTrue(os.path.exists(repo), 'Output directory must exist.') # Call to Butler will fail if repo is corrupted butler = dafButler.Butler(repo) self.assertIn("LSSTCam-imSim/calib", butler.registry.queryCollections())
def _export_for_copy(dataset, repo): """Export a Gen 3 repository so that a dataset can make copies later. Parameters ---------- dataset : `lsst.ap.verify.dataset.Dataset` The dataset needing the ability to copy the repository. repo : `str` The location of the Gen 3 repository. """ butler = daf_butler.Butler(repo) with butler.export(directory=dataset.configLocation, format="yaml") as contents: contents.saveDatasets(butler.registry.queryDatasets(datasetType=..., collections=..., expand=True))
def plotSeeingHistogram(repo, sourceTable, ccd=35, gen='gen2', instrument='DECam', collections=[]): """Plot distribution of visit seeing. Parameters ---------- repo : `str` Repository corresponding to the output of an ap_pipe run. sourceTable : `pandas.core.frame.DataFrame` Pandas dataframe with DIA Sources from an APDB. ccd : `int` The ccd being considered, default 35. gen : `str`, optional Either 'gen2' or 'gen3' instrument : `str`, optional Default is 'DECam', used with gen3 butler only collections : `list` or `str`, optional Must be provided for gen3 to load the camera properly """ fwhm = pd.DataFrame() visits = np.unique(sourceTable['visit']) radii = [] if gen == 'gen2': butler = dafPersist.Butler(repo) for visit in visits: calexp = butler.get('calexp_sub', visit=int(visit), ccd=ccd, bbox=lsst.geom.Box2I()) psf = calexp.getPsf() psfSize = psf.computeShape().getDeterminantRadius() radii.append(psfSize*2.355) # convert sigma to FWHM else: # gen3 butler = dafButler.Butler(repo) for visit in visits: psf = butler.get('calexp.psf', instrument=instrument, collections=collections, visit=int(visit), detector=ccd) psfSize = psf.computeShape().getDeterminantRadius() radii.append(psfSize*2.355) # convert sigma to FWHM # Get just one calexp for WCS purposes calexp = butler.get('calexp', collections=collections, instrument=instrument, visit=int(visit), detector=ccd) fwhm['visit'] = pd.Series(visits) fwhm['radius'] = pd.Series(radii, index=fwhm.index) pixelScale = calexp.getWcs().getPixelScale().asArcseconds() # same for all visits fig, ax = plt.subplots(figsize=(6, 4)) plt.hist(fwhm['radius'].values, alpha=0.5) plt.xlabel('Seeing FWHM (pixels)') plt.ylabel('Visit count') secax = ax.secondary_xaxis('top', functions=(lambda x: x*pixelScale, lambda x: x/pixelScale)) secax.set_xlabel('Seeing FWHM (arcseconds)')
def setUp(self): self.config = GenerateDonutCatalogWcsTaskConfig() self.config.donutSelector.fluxField = "g_flux" self.config.donutSelector.donutRadius = 0.0 self.task = GenerateDonutCatalogWcsTask(config=self.config) moduleDir = getModulePath() self.testDataDir = os.path.join(moduleDir, "tests", "testData") self.repoDir = os.path.join(self.testDataDir, "gen3TestRepo") self.centerRaft = ["R22_S10", "R22_S11"] self.butler = dafButler.Butler(self.repoDir) self.registry = self.butler.registry
def analysisButler(self): """A Butler that can read from a Gen 3 repository with outputs (`lsst.daf.butler.Butler`, read-only). Notes ----- Assumes `repo` has been initialized. """ if self._analysisButler is None: try: self._analysisButler = dafButler.Butler( self.repo, collections=self.runName, writeable=False) except OSError as e: raise RuntimeError( f"{self.repo} is not a Gen 3 repository") from e return self._analysisButler
def update_butler(event): global config global butler global registry try: config = repo_select.value.joinpath("butler.yaml") butler = dafButler.Butler(config=str(config)) registry = butler.registry collections = list(registry.queryCollections()) collection_select.options = collections collection_select.value = collections[0] debug_text.value = f"Successfully loaded butler from {config}." except: debug_text.value = f"Failed to load Butler from {config}"
def testPrerequisiteLookupFunction(self): """This tests that a lookup function defined on a prerequisite input is called when building a quantum graph. """ butler = dafButler.Butler( os.path.join(getPackageDir("ci_hsc_gen3"), "DATA", "butler.yaml")) pipeline = pipeBase.Pipeline("Test LookupFunction Pipeline") pipeline.addTask(LookupTestPipelineTask, "test") graphBuilder = pipeBase.GraphBuilder(butler.registry) graph = graphBuilder.makeGraph(pipeline, ["HSC/calib"], None, None) outputs = list(graph) # verify the graph contains no datasetRefs for brighter fatter kernels # instead of the datasetRefs that exist in the registry. numberOfInputs = len(outputs[0].quantum.inputs['bfKernel']) self.assertEqual(numberOfInputs, 0)
def testPipelineOnePairOnly(self): pipeCmd = writePipetaskCmd( self.repoDir, self.testRunName, self.instrument, self.collections, pipelineYaml=self.pipelineYaml, ) pipeCmd += f" -d 'exposure IN ({self.visitNum}) and detector IN (191, 192)'" runProgram(pipeCmd) # Get Butler with updated registry self.butler = dafButler.Butler(self.repoDir) donutExtra = self.butler.get("donutStampsExtra", dataId=self.dataIdExtra, collections=[self.testRunName]) donutIntra = self.butler.get("donutStampsIntra", dataId=self.dataIdIntra, collections=[self.testRunName]) zernAvg = self.butler.get( "zernikeEstimateAvg", dataId=self.dataIdExtra, collections=[self.testRunName], ) zernRaw = self.butler.get( "zernikeEstimateRaw", dataId=self.dataIdExtra, collections=[self.testRunName], ) self.assertEqual(len(donutExtra), 2) self.assertEqual(len(donutExtra), len(donutIntra)) self.assertEqual(np.shape(zernAvg), (19, )) self.assertEqual(np.shape(zernRaw), (2, 19)) self.badDataId = copy(self.dataIdExtra) self.badDataId["detector"] = 195 with self.assertRaises(LookupError): self.butler.get( "donutStampsExtra", dataId=self.badDataId, collections=[self.testRunName], )
def update_butler(event): global config global butler global registry try: butler = dafButler.Butler(config=repo_config_string) registry = butler.registry collections = list(registry.queryCollections()) collection_select.options = collections collection_select.value = collections[0] collection2_select.options = collections collection2_select.value = collections[0] debug_text.value = "Successfully loaded butler." except Exception as e: debug_text.value = f"Failed to load Butler: {str(e)}" log.error(f"{str(e)}")