class MatplotlibStorageTestCase(lsst.utils.tests.TestCase): def setUp(self): inputDir = os.path.join(ROOT, "data", "input") self.testDir = tempfile.mkdtemp(dir=os.path.join(ROOT, 'tests'), prefix=type(self).__name__ + '-') self.butler = Butler(inputs=inputDir, outputs={ "root": self.testDir, "mode": 'rw' }) def tearDown(self): del self.butler if os.path.exists(self.testDir): shutil.rmtree(self.testDir) def testWriteFigure(self): """Test writing a matpotlib figure to a repository.""" import matplotlib matplotlib.use("Agg") from matplotlib import pyplot fig = pyplot.figure() pyplot.plot([0, 1], [0, 1], "k") self.butler.put(fig, "test_plot", visit=1, filter="g") self.assertTrue( self.butler.datasetExists("test_plot", visit=1, filter="g")) self.assertTrue( os.path.exists(self.butler.getUri("test_plot", visit=1, filter="g")))
class MatplotlibStorageTestCase(lsst.utils.tests.TestCase): def setUp(self): inputDir = os.path.join(ROOT, "data", "input") self.testDir = tempfile.mkdtemp(dir=os.path.join(ROOT, 'tests'), prefix=type(self).__name__+'-') self.butler = Butler(inputs=inputDir, outputs={"root": self.testDir, "mode": 'rw'}) def tearDown(self): del self.butler if os.path.exists(self.testDir): shutil.rmtree(self.testDir) def testWriteFigure(self): """Test writing a matpotlib figure to a repository.""" import matplotlib matplotlib.use("Agg") from matplotlib import pyplot fig = pyplot.figure() pyplot.plot([0, 1], [0, 1], "k") self.butler.put(fig, "test_plot", visit=1, filter="g") self.assertTrue(self.butler.datasetExists("test_plot", visit=1, filter="g")) self.assertTrue(os.path.exists(self.butler.getUri("test_plot", visit=1, filter="g")))
def prep(self): self.prior = bfd.MomentPrior() priorFiles = [] priorButler = Butler(self.config.priorRerun) prior_skyMap = priorButler.get('deepCoadd_skyMap') for tract in self.config.priorTracts: for patchInfo in prior_skyMap[tract]: patch = '%d,%d' % patchInfo.getIndex() if self.config.priorPatches: if patch not in self.config.priorPatches: continue if priorButler.datasetExists('deepCoadd_momentPrior', tract=tract, patch=patch, filter=self.config.priorFilter, label=self.config.priorLabel): priorFiles.append( priorButler.get('deepCoadd_momentPrior_filename', tract=tract, patch=patch, filter=self.config.priorFilter, label=self.config.priorLabel)[0]) max_file = len(priorFiles) if self.config.maxPriorFiles > 0: max_file = self.config.maxPriorFiles first = True for file in priorFiles[:max_file]: if file.find('_parent') > 0: self.log.info("Skipping %s, from parent" % file) continue self.log.info("Adding prior %s" % file) try: cat = lsst.afw.table.BaseCatalog.readFits(file) self.prior.addCatalog(cat, self.config.invariantCovariance, self.config.sampleFraction, self.config.sampleSeed) # Should be same for all prior catalogs if first: self.cov = numpy.array( cat.getTable().getMetadata().getArrayDouble( 'COV')).reshape(6, 6) first = False except Exception as e: print('Failed to read', e) continue self.prior.prepare() self.fluxMin = self.prior.getFluxMin() self.fluxMax = self.prior.getFluxMax() self.varMin = self.prior.getVarMin() self.varMax = self.prior.getVarMax() selectionPqr = self.prior.selectionProbability( self.cov.astype(numpy.float32)) deselect = selectionPqr.copy() deselect[0] = 1 - selectionPqr[0] for i in range(1, 6): deselect[i] *= -1. self.noSelectPqr = deselect
def prep(self): if self.initialized: return self.prior = None priorFiles = [] priorButler = Butler(self.config.priorRerun) prior_skyMap = priorButler.get('deepCoadd_skyMap') for tract in self.config.priorTracts: for patchInfo in prior_skyMap[tract]: patch = '%d,%d' % patchInfo.getIndex() if self.config.priorPatches: if patch not in self.config.priorPatches: continue if priorButler.datasetExists('deepCoadd_prior', tract=tract, patch=patch, filter=self.config.priorFilter, label=self.config.priorLabel): priorFiles.append(priorButler.getUri('deepCoadd_prior', tract=tract, patch=patch, filter=self.config.priorFilter, label=self.config.priorLabel)) max_file = len(priorFiles) if self.config.maxPriorFiles > 0: max_file = self.config.maxPriorFiles self.zBin = None for file in priorFiles[:max_file]: if file.find('_parent') > 0: self.log.info("Skipping %s, from parent" % file) continue self.log.info("Adding prior %s" % file) try: cat = afwTable.BaseCatalog.readFits(file) md = cat.getTable().getMetadata().toDict() if self.prior is None: self.fluxMin = md['FLUXMIN'] self.fluxMax = md['FLUXMAX'] self.varMin = md['VARMIN'] self.varMax = md['VARMAX'] cov_even = np.array(md['COV_EVEN']) cov_odd = np.array(md['COV_ODD']) self.zMax = md['ZMAXCUT'] self.zMin = md['ZMINCUT'] self.noiseFactor = md['noiseFactor'] self.priorSigmaCutoff = md['priorSigmaCutoff'] self.priorSigmaStep = md['priorSigmaStep'] self.priorSigmaBuffer = md['priorSigmaBuffer'] self.nSample = md['NSAMPLE'] self.selectionOnly = md['selectionOnly'] self.invariantCovariance = md['invariantCovariance'] covMat = self.bfd.MomentCov(cov_even.reshape(self.n_even, self.n_even), cov_odd.reshape(self.n_odd, self.n_odd)) self.prior = self.bfd.KDTreePrior(self.fluxMin, self.fluxMax, covMat, self.ud, self.nSample, self.selectionOnly, self.noiseFactor, self.priorSigmaStep, self.priorSigmaCutoff, self.priorSigmaBuffer, self.invariantCovariance) else: fluxMin = md['FLUXMIN'] fluxMax = md['FLUXMAX'] varMin = md['VARMIN'] varMax = md['VARMAX'] cov_even = np.array(md['COV_EVEN']) cov_odd = np.array(md['COV_ODD']) zMax = md['ZMAXCUT'] zMin = md['ZMINCUT'] noiseFactor = md['noiseFactor'] priorSigmaCutoff = md['priorSigmaCutoff'] priorSigmaStep = md['priorSigmaStep'] priorSigmaBuffer = md['priorSigmaBuffer'] nSample = md['NSAMPLE'] selectionOnly = md['selectionOnly'] invariantCovariance = md['invariantCovariance'] mismatch = False if fluxMin != self.fluxMin: self.log.info('does not match fluxMin') mismatch = True if fluxMax != self.fluxMax: self.log.info('does not match fluxMax') mismatch = True if varMin != self.varMin: self.log.info('does not match varMin') mismatch = True if varMax != self.varMax: self.log.info('does not match varMax') mismatch = True if zMin != self.zMin: self.log.info('does not match zMin') mismatch = True if zMax != self.zMax: self.log.info('does not match zMax') mismatch = True if noiseFactor != self.noiseFactor: self.log.info('does not match fluxMin') mismatch = True if priorSigmaBuffer != self.priorSigmaBuffer: self.log.info('does not match priorSigmaBuffer') mismatch = True if priorSigmaStep != self.priorSigmaStep: self.log.info('does not match priorSigmaStep') mismatch = True if priorSigmaCutoff != self.priorSigmaCutoff: self.log.info('does not match priorSigmaCutoff') mismatch = True if nSample != self.nSample: self.log.info('does not match nSample') mismatch = True if selectionOnly != self.selectionOnly: self.log.info('does not match selectionOnly') mismatch = True if invariantCovariance != self.invariantCovariance: self.log.info('does not match invariantCovariance') mismatch = True if mismatch: self.log.info('Skipping %s' % file) continue for s in cat: ti = self.bfd.TemplateInfo() ti.m = s.get('m') ti.dm = s.get('dm').reshape(self.bfd.BFDConfig.MSIZE, self.bfd.BFDConfig.DSIZE) ti.dxy = s.get('dxy').reshape(self.bfd.BFDConfig.XYSIZE, self.bfd.BFDConfig.DSIZE) ti.nda = s.get('nda') ti.id = s.get('bfd_id') self.prior.addTemplateInfo(ti) except Exception as e: print('Failed to read', e) continue self.prior.prepare() self.initialized = True
class ProductionTestCase(lsst.utils.tests.TestCase): def setUp(self): self.butler = Butler( os.path.join(weeklyRerun, "pipeline", self.configuration, "pipeline")) self.visits = dict(brn=getBrnVisits, bmn=getBmnVisits)[self.configuration]() self.design = PfsDesign.read(1, weeklyRaw) def tearDown(self): del self.butler def testVisitProducts(self): """Test that visit products exist""" for visit in self.visits: for arm in self.configuration: self.assertTrue( self.butler.datasetExists("pfsArm", visit=visit, arm=arm)) self.assertTrue( self.butler.datasetExists("pfsArmLsf", visit=visit, arm=arm)) self.assertTrue(self.butler.datasetExists("pfsMerged", visit=visit)) self.assertTrue( self.butler.datasetExists("pfsMergedLsf", visit=visit)) config = self.butler.get("pfsConfig", visit=visit) for target in config: if target.catId == -1: # Not a real target that we've processed continue self.assertTrue( self.butler.datasetExists("pfsSingle", target.identity, visit=visit)) self.assertTrue( self.butler.datasetExists("pfsSingleLsf", target.identity, visit=visit)) def testObjectProducts(self): """Test that object products exist""" for target in self.design: if target.catId == -1: # Not a real target that we've processed continue dataId = target.identity.copy() dataId["nVisit"] = len(self.visits) dataId["pfsVisitHash"] = calculatePfsVisitHash(self.visits) self.assertTrue(self.butler.datasetExists("pfsObject", dataId), msg=str(dataId)) self.assertTrue(self.butler.datasetExists("pfsObjectLsf", dataId), msg=str(dataId)) def testSpectra(self): """Test that spectra files can be read, and they are reasonable""" for visit in self.visits: config = self.butler.get("pfsConfig", visit=visit) spectra = self.butler.get("pfsMerged", visit=visit) lsf = self.butler.get("pfsMergedLsf", visit=visit) badMask = spectra.flags.get("NO_DATA", "BAD_FLAT", "INTRP") for fiberId, target in zip(config.fiberId, config): if target.catId == -1: # Not a real target that we've processed continue with self.subTest(visit=visit, fiberId=fiberId): index = np.where(spectra.fiberId == fiberId)[0] mask = spectra.mask[index] select = (mask & badMask) == 0 self.assertGreater(select.sum(), 0.75 * len(mask), "Too many masked pixels") self.assertFalse(np.all(spectra.sky[index][select] == 0)) self.assertTrue( np.all(spectra.variance[index][select] > 0)) self.assertIn(fiberId, lsf) self.assertIsInstance(lsf[fiberId], Lsf) def testObjects(self): """Test that object files can be read, and they are reasonable""" for target in self.design: if target.catId == -1: # Not a real target that we've processed continue with self.subTest(**target.identity): dataId = target.identity.copy() dataId.update(nVisit=len(self.visits), pfsVisitHash=calculatePfsVisitHash(self.visits)) spectrum = self.butler.get("pfsObject", dataId) lsf = self.butler.get("pfsObjectLsf", dataId) badMask = spectrum.flags.get("NO_DATA") select = (spectrum.mask & badMask) == 0 minFrac = dict(brn=0.75, bmn=0.70)[self.configuration] self.assertGreater(select.sum(), minFrac * len(spectrum), "Too many masked pixels") self.assertFalse(np.all(spectrum.sky[select] == 0)) self.assertTrue(np.all(spectrum.variance[select] > 0)) self.assertIsInstance(lsf, Lsf)
def prep(self): self.prior = bfd.MomentPrior() priorFiles = [] priorButler = Butler(self.config.priorRerun) prior_skyMap = priorButler.get('deepCoadd_skyMap') for tract in self.config.priorTracts: for patchInfo in prior_skyMap[tract]: patch = '%d,%d' % patchInfo.getIndex() if self.config.priorPatches: if patch not in self.config.priorPatches: continue if priorButler.datasetExists('deepCoadd_momentPrior', tract=tract, patch=patch, filter=self.config.priorFilter, label=self.config.priorLabel): priorFiles.append( priorButler.get('deepCoadd_momentPrior_filename', tract=tract, patch=patch, filter=self.config.priorFilter, label=self.config.priorLabel)[0]) if len(priorFiles) == 0: raise Exception('No Prior files found') max_file = len(priorFiles) if self.config.maxPriorFiles > 0: max_file = self.config.maxPriorFiles first = True self.zBin = None for file in priorFiles[:max_file]: if file.find('_parent') > 0: self.log.info("Skipping %s, from parent" % file) continue self.log.info("Adding prior %s" % file) try: cat = lsst.afw.table.BaseCatalog.readFits(file) self.prior.addCatalog(cat, self.config.invariantCovariance, self.config.sampleFraction, self.config.sampleSeed) # Should be same for all prior catalogs if first: self.cov = numpy.array( cat.getTable().getMetadata().getArrayDouble( 'COV')).reshape(6, 6) self.zBin = cat.getTable().getMetadata().getInt('ZBIN') self.fluxBin = cat.getTable().getMetadata().getInt( 'NOISEBIN') first = False self.log.info('Processing zbin: %d flux: %d' % (self.zBin, self.fluxBin)) except Exception as e: print 'Failed to read', e continue self.log.info("Building tree from prior") self.prior.prepare() self.fluxMin = self.prior.getFluxMin() self.fluxMax = self.prior.getFluxMax() self.varMin = self.prior.getVarMin() self.varMax = self.prior.getVarMax() if self.config.writeNonSelect: self.selectionPqr = self.prior.selectionProbability( self.cov.astype(numpy.float32)) deselect = self.selectionPqr.copy() deselect[0] = 1 - self.selectionPqr[0] for i in range(1, 6): deselect[i] *= -1. self.noSelectPqr = deselect self.log.info('Probability of not being selected: %s' % self.noSelectPqr) if self.config.readZFile is False: self.log.info("Reading redshift file") zFile = pyfits.open(self.config.zFile)[1].data self.zRedshift = numpy.zeros(len(zFile)) self.zId = zFile[self.config.zId] if self.config.useXY is False: zRa = zFile[self.config.zRa] * numpy.pi / 180 zDec = zFile[self.config.zDec] * numpy.pi / 180 if self.config.useAllZ: mask = zFile['frankenz_photoz_%s_isnull' % self.config.zField] == False self.zRedshift[mask] = zFile['frankenz_photoz_%s' % self.config.zField][mask] mask = (zFile['mizuki_photoz_%s_isnull' % self.config.zField] == False) & (self.zRedshift == 0.0) self.zRedshift[mask] = zFile['mizuki_photoz_%s' % self.config.zField][mask] mask = (zFile['nnpz_photoz_%s_isnull' % self.config.zField] == False) & (self.zRedshift == 0.0) self.zRedshift[mask] = zFile['nnpz_photoz_%s' % self.config.zField][mask] mask = (zFile['mlz_photoz_%s_isnull' % self.config.zField] == False) & (self.zRedshift == 0.0) self.zRedshift[mask] = zFile['mlz_photoz_%s' % self.config.zField][mask] else: mask = zFile['%s_photoz_%s_isnull' % (self.config.zType, self.config.zField)] == False self.zRedshift[mask] = zFile['%s_photoz_%s' % (self.config.zType, self.config.zField)][mask] posRef = numpy.dstack([ numpy.sin(zDec) * numpy.cos(zRa), numpy.sin(zDec) * numpy.sin(zRa), numpy.sin(zDec) ])[0] self.log.info('Building redshift treee') self.zTree = scipy.spatial.cKDTree(posRef) else: zX = zFile[self.config.zRa] zY = zFile[self.config.zDec] posRef = numpy.dstack([zX, zY])[0] self.zTree = scipy.spatial.cKDTree(posRef) self.zRedshift = zFile[self.config.zField]