コード例 #1
0
ファイル: indexExposure.py プロジェクト: lsst/daf_ingest
    def __call__(self, args):
        """Run the task on a single target.

        This implementation is nearly equivalent to the overridden one, but
        it never writes out metadata and always returns results. For memory
        efficiency reasons, the return value is exactly the one of |run|,
        rather than a :class:`~lsst.pipe.base.Struct` wrapped around it.
        """
        data_ref, kwargs = args
        if self.log is None:
            self.log = Log.getDefaultLogger()
        if hasattr(data_ref, "dataId"):
            self.log.MDC("LABEL", str(data_ref.dataId))
        elif isinstance(data_ref, (list, tuple)):
            self.log.MDC("LABEL", str([ref.dataId for ref in data_ref if hasattr(ref, "dataId")]))
        task = self.makeTask(args=args)
        result = None
        try:
            result = task.run(data_ref, **kwargs)
        except Exception, e:
            if self.doRaise:
                raise
            if hasattr(data_ref, "dataId"):
                task.log.fatal("Failed on dataId=%s: %s" % (data_ref.dataId, e))
            elif isinstance(data_ref, (list, tuple)):
                task.log.fatal("Failed on dataId=[%s]: %s" %
                               (",".join([str(_.dataId) for _ in data_ref]), e))
            else:
                task.log.fatal("Failed on dataRef=%s: %s" % (data_ref, e))
            if not isinstance(e, pipe_base.TaskError):
                traceback.print_exc(file=sys.stderr)
コード例 #2
0
 def parse_args(self, config, args=None, log=None, override=None):
     if args is None:
         args = sys.argv[1:]
     namespace = Namespace()
     namespace.config = config
     namespace.clobberConfig = False
     namespace.butler = None
     namespace.log = log if log is not None else Log.getDefaultLogger()
     namespace = super(MeasurementDebuggerArgumentParser,
                       self).parse_args(args=args, namespace=namespace)
     del namespace.configfile
     return namespace
コード例 #3
0
def _poolFunctionWrapper(function, arg):
    """Wrapper around function to catch exceptions that don't inherit from Exception

    Such exceptions aren't caught by multiprocessing, which causes the slave
    process to crash and you end up hitting the timeout.
    """
    try:
        return function(arg)
    except Exception:
        raise  # No worries
    except:
        # Need to wrap the exception with something multiprocessing will recognise
        cls, exc, tb = sys.exc_info()
        log = Log.getDefaultLogger()
        log.warn("Unhandled exception %s (%s):\n%s" %
                 (cls.__name__, exc, traceback.format_exc()))
        raise Exception("Unhandled exception: %s (%s)" % (cls.__name__, exc))
コード例 #4
0
    def __init__(self, filenameList, healpix, nside):
        """!Constructor

        @param filenameList  List of filenames; first is the multiindex, then
                             follows the individual index files
        @param healpix       Healpix number
        @param nside         Healpix nside
        """
        if len(filenameList) < 2:
            raise RuntimeError("Insufficient filenames provided for multiindex (%s): expected >= 2" %
                               (filenameList,))
        self._filenameList = filenameList
        self._healpix = int(healpix)
        self._nside = int(nside)
        self._mi = None
        self._loaded = False
        self.log = Log.getDefaultLogger()
コード例 #5
0
ファイル: test_IndexExposure.py プロジェクト: lsst/daf_ingest
 def test_basic(self):
     """Perform basic correctness testing."""
     ps = []
     # Construct property sets for two exposures centered on the equator
     for center in ((0.0, 0.0), (180.0, 0.0)):
         props = daf_base.PropertySet()
         props.add("NAXIS1", 9)
         props.add("NAXIS2", 9)
         props.add("RADECSYS", "ICRS")
         props.add("EQUINOX", 2000.0)
         props.add("CTYPE1", "RA---TAN")
         props.add("CTYPE2", "DEC--TAN")
         props.add("CRPIX1", 5.0)
         props.add("CRPIX2", 5.0)
         props.add("CRVAL1", center[0])
         props.add("CRVAL2", center[1])
         props.add("CD1_1", 1.0)
         props.add("CD2_1", 0.0)
         props.add("CD1_2", 0.0)
         props.add("CD2_2", 1.0)
         ps.append(props)
     # Retain one as is, and create an exposure from the other
     inputs = [ps[0], afw_image.ExposureF(8, 8, afw_image.makeWcs(ps[1]))]
     # Test data-ids are just integers.
     refs = [MockDataRef(i, v) for i, v in enumerate(inputs)]
     config = IndexExposureConfig()
     config.allow_replace = True
     config.defer_writes = True
     config.init_statements = ['PRAGMA page_size = 4096']
     database = sqlite3.connect(":memory:")
     # Avoid the command line parser.
     parsed_cmd = pipe_base.Struct(
         config=config,
         log=Log.getDefaultLogger(),
         doraise=True,
         clobberConfig=False,
         noBackupConfig=False,
         database=database,
         dstype="bogus",
         id=pipe_base.Struct(refList=refs),
     )
     runner = IndexExposureRunner(IndexExposureTask, parsed_cmd)
     runner.run(parsed_cmd)
     # Re-ingest to test that allow_replace=True works. Toggle off
     # the deferred writes to test that as well.
     runner.config.defer_writes = False
     runner.run(parsed_cmd)
     # Re-ingest to test that allow_replace=False raises an exception.
     runner.config.allow_replace = False
     with self.assertRaises(Exception):
         runner.run(parsed_cmd)
     # Now, verify the contents of the database. First, check that
     # data ids are recoverable.
     data_ids = sorted(
         pickle.loads(str(r[0]))
         for r in database.execute("SELECT pickled_data_id FROM exposure"))
     self.assertEqual(data_ids, [0, 1])
     # Next, run a spatial query and check that it returns the
     # expected results.
     center = sphgeom.UnitVector3d(sphgeom.LonLat.fromDegrees(4.0, 1.0))
     circle = sphgeom.Circle(center, sphgeom.Angle.fromDegrees(1.5))
     results = find_intersecting_exposures(database, circle)
     self.assertEqual(len(results), 1)
     info = results[0]
     # The first input exposure should have been returned, and
     # should intersect the query region
     self.assertEqual(info.data_id, 0)
     self.assertEqual(circle.relate(info.boundary), sphgeom.INTERSECTS)
     database.close()
コード例 #6
0
    def readSrc(self, dataRef):
        """Read source catalog etc for input dataRef

        The following are returned:
        Source catalog, matched list, and wcs will be read from 'src', 'srcMatch', and 'calexp_md',
        respectively.

        NOTE: If the detector has nQuarter%4 != 0 (i.e. it is rotated w.r.t the focal plane
              coordinate system), the (x, y) pixel values of the centroid slot for the source
              catalogs are rotated such that pixel (0, 0) is the LLC (i.e. the coordinate system
              expected by meas_mosaic).

        If color transformation information is given, it will be applied to the reference flux
        of the matched list.  The source catalog and matched list will be converted to measMosaic's
        Source and SourceMatch and returned.

        The number of 'Source's in each cell defined by config.cellSize will be limited to brightest
        config.nStarPerCell.
        """

        self.log = Log.getDefaultLogger()

        dataId = dataRef.dataId

        try:
            if not dataRef.datasetExists("src"):
                raise RuntimeError("no data for src %s" % (dataId))
            if not dataRef.datasetExists("calexp_md"):
                raise RuntimeError("no data for calexp_md %s" % (dataId))

            calexp_md = dataRef.get("calexp_md", immediate=True)
            detector = dataRef.get("camera")[dataRef.dataId["ccd"]]  # OK for HSC; maybe not for other cameras
            wcs = afwGeom.makeSkyWcs(calexp_md)
            nQuarter = detector.getOrientation().getNQuarter()
            sources = dataRef.get("src", immediate=True, flags=afwTable.SOURCE_IO_NO_FOOTPRINTS)

            # Check if we are looking at HSC stack outputs: if so, no pixel rotation of sources is
            # required, but alias mapping must be set to associate HSC's schema with that of LSST.
            hscRun = mosaicUtils.checkHscStack(calexp_md)
            if hscRun is None:
                if nQuarter%4 != 0:
                    dims = afwImage.bboxFromMetadata(calexp_md).getDimensions()
                    sources = mosaicUtils.rotatePixelCoords(sources, dims.getX(), dims.getY(),
                                                            nQuarter)

            # Set some alias maps for the source catalog where needed for
            # backwards compatibility
            if self.config.srcSchemaMap and hscRun:
                aliasMap = sources.schema.getAliasMap()
                for lsstName, otherName in self.config.srcSchemaMap.items():
                    aliasMap.set(lsstName, otherName)
            if self.config.flagsToAlias and "calib_psfUsed" in sources.schema:
                aliasMap = sources.schema.getAliasMap()
                for lsstName, otherName in self.config.flagsToAlias.items():
                    aliasMap.set(lsstName, otherName)

            refObjLoader = self.config.loadAstrom.apply(butler=dataRef.getButler())
            srcMatch = dataRef.get("srcMatch", immediate=True)
            if hscRun is not None:
                # The reference object loader grows the bbox by the config parameter pixelMargin.  This
                # is set to 50 by default but is not reflected by the radius parameter set in the
                # metadata, so some matches may reside outside the circle searched within this radius
                # Thus, increase the radius set in the metadata fed into joinMatchListWithCatalog() to
                # accommodate.
                matchmeta = srcMatch.table.getMetadata()
                rad = matchmeta.getDouble("RADIUS")
                matchmeta.setDouble("RADIUS", rad*1.05, "field radius in degrees, approximate, padded")
            matches = refObjLoader.joinMatchListWithCatalog(srcMatch, sources)

            # Set the aliap map for the matched sources (i.e. the [1] attribute schema for each match)
            if self.config.srcSchemaMap is not None and hscRun is not None:
                for mm in matches:
                    aliasMap = mm[1].schema.getAliasMap()
                    for lsstName, otherName in self.config.srcSchemaMap.items():
                        aliasMap.set(lsstName, otherName)

            if hscRun is not None:
                for slot in ("PsfFlux", "ModelFlux", "ApFlux", "GaussianFlux", "Centroid", "Shape"):
                    getattr(matches[0][1].getTable(), "define" + slot)(
                        getattr(sources, "get" + slot + "Definition")())
                    # For some reason, the CalibFlux slot in sources is coming up as centroid_sdss, so
                    # set it to flux_naive explicitly
                    for slot in ("CalibFlux", ):
                        getattr(matches[0][1].getTable(), "define" + slot)("flux_naive")
            matches = [m for m in matches if m[0] is not None]
            refSchema = matches[0][0].schema if matches else None

            if self.cterm is not None and len(matches) != 0:
                # Add a "flux" field to the input schema of the first element
                # of the match and populate it with a colorterm correct flux.
                mapper = afwTable.SchemaMapper(refSchema)
                for key, field in refSchema:
                    mapper.addMapping(key)
                fluxKey = mapper.editOutputSchema().addField("flux", type=float, doc="Reference flux")
                fluxErrKey = mapper.editOutputSchema().addField("fluxErr", type=float,
                                                                  doc="Reference flux uncertainty")
                table = afwTable.SimpleTable.make(mapper.getOutputSchema())
                table.preallocate(len(matches))
                for match in matches:
                    newMatch = table.makeRecord()
                    newMatch.assign(match[0], mapper)
                    match[0] = newMatch

                # extract the matched refCat as a Catalog for the colorterm code
                refCat = afwTable.SimpleCatalog(matches[0].first.schema)
                refCat.reserve(len(matches))
                for x in matches:
                    record = refCat.addNew()
                    record.assign(x.first)

                refMag, refMagErr = self.cterm.getCorrectedMagnitudes(refCat,
                                                                      afwImage.Filter(calexp_md).getName())
                # NOTE: mosaic assumes fluxes are in Jy
                refFlux = (refMag*astropy.units.ABmag).to_value(astropy.units.Jy)
                refFluxErr = afwImage.fluxErrFromABMagErr(refMagErr, refMag)
                matches = [self.setCatFlux(m, flux, fluxKey, fluxErr, fluxErrKey) for
                           m, flux, fluxErr in zip(matches, refFlux, refFluxErr) if flux == flux]
            else:
                filterName = afwImage.Filter(calexp_md).getName()
                refFluxField = measAlg.getRefFluxField(refSchema, filterName)
                refSchema.getAliasMap().set("flux", refFluxField)

            # LSST reads in a_net catalogs with flux in "janskys", so must convert back to DN.
            matches = mosaicUtils.matchJanskyToDn(matches)

            selSources = self.selectStars(sources, self.config.includeSaturated)
            selMatches = self.selectStars(matches, self.config.includeSaturated)

            retSrc = list()
            retMatch = list()

            if len(selMatches) > self.config.minNumMatch:
                naxis1, naxis2 = afwImage.bboxFromMetadata(calexp_md).getDimensions()
                if hscRun is None:
                    if nQuarter%2 != 0:
                        naxis1, naxis2 = naxis2, naxis1
                bbox = afwGeom.Box2I(afwGeom.Point2I(0, 0), afwGeom.Extent2I(naxis1, naxis2))
                cellSet = afwMath.SpatialCellSet(bbox, self.config.cellSize, self.config.cellSize)
                for s in selSources:
                    if numpy.isfinite(s.getRa().asDegrees()): # get rid of NaN
                        src = measMosaic.Source(s)
                        src.setExp(dataId["visit"])
                        src.setChip(dataId["ccd"])
                        try:
                            tmp = measMosaic.SpatialCellSource(src)
                            cellSet.insertCandidate(tmp)
                        except:
                            self.log.info("FAILED TO INSERT CANDIDATE: visit=%d ccd=%d x=%f y=%f" %
                                          (dataRef.dataId["visit"], dataRef.dataId["ccd"],
                                           src.getX(), src.getY()) + " bbox=" + str(bbox))
                for cell in cellSet.getCellList():
                    cell.sortCandidates()
                    for i, cand in enumerate(cell):
                        src = cand.getSource()
                        retSrc.append(src)
                        if i == self.config.nStarPerCell - 1:
                            break
                for m in selMatches:
                    if m[0] is not None and m[1] is not None:
                        match = (measMosaic.Source(m[0], wcs), measMosaic.Source(m[1]))
                        match[1].setExp(dataId["visit"])
                        match[1].setChip(dataId["ccd"])
                        retMatch.append(match)
            else:
                self.log.info("%8d %3d : %d/%d matches  Suspicious to wrong match. Ignore this CCD" %
                              (dataRef.dataId["visit"], dataRef.dataId["ccd"], len(selMatches), len(matches)))

        except Exception as e:
            self.log.warn("Failed to read %s: %s" % (dataId, e))
            return dataId, [None, None, None]

        return dataId, [retSrc, retMatch, wcs]
コード例 #7
0
def run(visit, rerun, config):
    mapper = getMapper()
    dataId = {'visit': visit, 'rerun': rerun}
    rrdir = mapper.getPath('outdir', dataId)
    if not os.path.exists(rrdir):
        print('Creating directory for ouputs:', rrdir)
        os.makedirs(rrdir)
    else:
        print('Output directory:', rrdir)
    io = pipReadWrite.ReadWrite(mapper, ['visit'], config=config)
    # ccdProc = pipProcCcd.ProcessCcd(config=config, Isr=NullISR, Calibrate=MyCalibrate)
    # raws = io.readRaw(dataId)
    # detrends = io.detrends(dataId, config)
    print('Reading exposure')
    # exposure = io.read('visitim', dataId)
    exposure = io.inButler.get('visitim', dataId)
    print('exposure is', exposure)
    print('size', exposure.getWidth(), 'x', exposure.getHeight())
    # debug
    # mi = exposure.getMaskedImage()
    # img = mi.getImage()
    # var = mi.getVariance()
    # print('var at 90,100 is', var.get(90,100))
    # print('img at 90,100 is', img.get(90,100))
    # print('wcs is', exposure.getWcs())
    wcs = exposure.getWcs()
    assert wcs
    # print('ccdProc.run()...')
    # raws = [exposure]
    # exposure, psf, apcorr, brightSources, sources, matches, matchMeta = ccdProc.run(raws, detrends)
    print('Calibrate()...')
    log = Log.getDefaultLogger()
    cal = MyCalibrate(config=config, log=log, Photometry=MyPhotometry)
    psf, sources, footprints = cal.run2(exposure)

    print('Photometry()...')
    phot = pipPhot.Photometry(config=config, log=log)
    sources, footprints = phot.run(exposure, psf)
    print('sources:', len(sources))
    for s in sources:
        print('  ', s, s.getXAstrom(), s.getYAstrom(), s.getPsfFlux(),
              s.getIxx(), s.getIyy(), s.getIxy())

    print('footprints:', footprints)
    # oh yeah, baby!
    fps = footprints.getFootprints()
    print(len(fps))
    bb = []
    for f in fps:
        print('  Footprint', f)
        print('  ', f.getBBox())
        bbox = f.getBBox()
        bb.append(
            (bbox.getMinX(), bbox.getMinY(), bbox.getMaxX(), bbox.getMaxY()))
        print('   # peaks:', len(f.getPeaks()))
        for p in f.getPeaks():
            print('    Peak', p)
    # print('psf', psf)
    # print('sources', sources)
    # print('footprints', footprints)
    # psf, apcorr, brightSources, matches, matchMeta = self.calibrate(exposure, defects=defects)
    # if self.config['do']['phot']:
    #     sources, footprints = self.phot(exposure, psf, apcorr, wcs=exposure.getWcs())
    # psf, wcs = self.fakePsf(exposure)
    # sources, footprints = self.phot(exposure, psf)
    # sources = self.rephot(exposure, footprints, psf, apcorr=apcorr)
    # model = calibrate['model']
    # fwhm = calibrate['fwhm'] / wcs.pixelScale()
    # size = calibrate['size']
    #  psf = afwDet.createPsf(model, size, size, fwhm/(2*math.sqrt(2*math.log(2))))
    # print('done!')
    print('writing output...')
    io.write(dataId, psf=psf, sources=sources)
    print('done!')
    print('Writing bounding-boxes...')
    io.outButler.put(bb, 'bb', dataId)

    # print('Writing footprints...')
    # io.outButler.put(fps, 'footprints', dataId)

    # serialize a python version of footprints & peaks;
    # commented out because footprintsToPython does not exist
    # pyfoots = footprintsToPython(fps)
    # print('Writing py footprints...')
    # io.outButler.put(pyfoots, 'pyfoots', dataId)

    return bb
コード例 #8
0
def main():
    from optparse import OptionParser

    parser = OptionParser(usage='%(program) [args] RA Dec radius')
    parser.add_option('-o',
                      dest='outfn',
                      help='FITS table output filename',
                      default=None)
    (opt, args) = parser.parse_args()

    if len(args) != 3:
        parser.print_help()
        return -1

    ra = float(args[0])
    dec = float(args[1])
    radius = float(args[2])

    log = Log.getDefaultLogger()
    log.setLevel(Log.DEBUG)
    pol = policy.Policy()
    pol.set('matchThreshold', 30)
    solver = measAstrom.createSolver(pol, log)

    solver.setLogLevel(3)

    ids = solver.getIndexIdList()
    print('Index IDs:', ids)
    indexid = ids[0]

    idName = 'id'
    X = solver.getCatalogue(ra * afwGeom.degrees, dec * afwGeom.degrees,
                            radius * afwGeom.degrees, '', idName, indexid)
    ref = X.refsources
    inds = X.inds
    print('Got', len(ref), 'reference catalog sources')
    print('  got indices:', len(inds))

    print('Tag-along columns:')
    cols = solver.getTagAlongColumns(indexid)
    # print cols
    for c in cols:
        print('  column:', c.name, c.fitstype, c.ctype, c.units, c.arraysize)
    colnames = [c.name for c in cols]

    tagdata = []
    for c in cols:
        fname = 'getTagAlong' + c.ctype
        func = getattr(solver, fname)
        data = func(indexid, c.name, inds)
        # print 'called', fname, 'to get', c.name, c.ctype, '(len %i)' % len(data)
        tagdata.append(data)

    if opt.outfn is None:
        # SSV
        print('ra dec', end=' ')
        for c in cols:
            if c.arraysize > 1:
                for a in len(c.arraysize):
                    print(('%s_%i' % (c.name, a)), end=' ')
            else:
                print(c.name, end=' ')
        print()

        for i, r in enumerate(ref):
            print(r.getRa().asDegrees(), r.getDec().asDegrees(), end=' ')
            for c, d in zip(cols, tagdata):
                if c.arraysize > 1:
                    for a in len(c.arraysize):
                        print(d[c.arraysize * i + a], end=' ')
                else:
                    print(d[i], end=' ')
            print()

    else:
        import pyfits
        import numpy as np

        fitscols = []
        fitscols.append(
            pyfits.Column(name='RA',
                          array=np.array([r.getRa().asDegrees() for r in ref]),
                          format='D',
                          unit='deg'))
        fitscols.append(
            pyfits.Column(name='DEC',
                          array=np.array([r.getDec().asDegrees()
                                          for r in ref]),
                          format='D',
                          unit='deg'))
        for c, d in zip(cols, tagdata):
            fmap = {
                'Int64': 'K',
                'Int': 'J',
                'Bool': 'L',
                'Double': 'D',
            }
            if c.arraysize > 1:
                # May have to reshape the array as well...
                fitscols.append(
                    pyfits.Column(name=c.name,
                                  array=np.array(d),
                                  format='%i%s' %
                                  (c.arraysize, fmap.get(c.ctype, 'D'))))
            else:
                fitscols.append(
                    pyfits.Column(name=c.name,
                                  array=np.array(d),
                                  format=fmap.get(c.ctype, 'D')))

        pyfits.new_table(fitscols).writeto(opt.outfn, clobber=True)
        print('Wrote FITS table', opt.outfn)

    return 0
コード例 #9
0
    def readSrc(self, dataRef):
        """Read source catalog etc for input dataRef

        The following are returned:
        Source catalog, matched list, and wcs will be read from 'src', 'srcMatch', and 'calexp_md',
        respectively.

        NOTE: If the detector has nQuarter%4 != 0 (i.e. it is rotated w.r.t the focal plane
              coordinate system), the (x, y) pixel values of the centroid slot for the source
              catalogs are rotated such that pixel (0, 0) is the LLC (i.e. the coordinate system
              expected by meas_mosaic).

        If color transformation information is given, it will be applied to the reference flux
        of the matched list.  The source catalog and matched list will be converted to measMosaic's
        Source and SourceMatch and returned.

        The number of 'Source's in each cell defined by config.cellSize will be limited to brightest
        config.nStarPerCell.
        """

        self.log = Log.getDefaultLogger()

        dataId = dataRef.dataId

        try:
            if not dataRef.datasetExists("src"):
                raise RuntimeError("no data for src %s" % (dataId))
            if not dataRef.datasetExists("calexp_md"):
                raise RuntimeError("no data for calexp_md %s" % (dataId))

            calexp_md = dataRef.get("calexp_md", immediate=True)
            detector = dataRef.get("camera")[dataRef.dataId[
                "ccd"]]  # OK for HSC; maybe not for other cameras
            wcs = afwGeom.makeSkyWcs(calexp_md)
            nQuarter = detector.getOrientation().getNQuarter()
            sources = dataRef.get("src",
                                  immediate=True,
                                  flags=afwTable.SOURCE_IO_NO_FOOTPRINTS)

            # Check if we are looking at HSC stack outputs: if so, no pixel rotation of sources is
            # required, but alias mapping must be set to associate HSC's schema with that of LSST.
            hscRun = mosaicUtils.checkHscStack(calexp_md)
            if hscRun is None:
                if nQuarter % 4 != 0:
                    dims = afwImage.bboxFromMetadata(calexp_md).getDimensions()
                    sources = mosaicUtils.rotatePixelCoords(
                        sources, dims.getX(), dims.getY(), nQuarter)

            # Set the aliap map for the source catalog
            if self.config.srcSchemaMap is not None and hscRun is not None:
                aliasMap = sources.schema.getAliasMap()
                for lsstName, otherName in self.config.srcSchemaMap.items():
                    aliasMap.set(lsstName, otherName)

            refObjLoader = self.config.loadAstrom.apply(
                butler=dataRef.getButler())
            srcMatch = dataRef.get("srcMatch", immediate=True)
            if hscRun is not None:
                # The reference object loader grows the bbox by the config parameter pixelMargin.  This
                # is set to 50 by default but is not reflected by the radius parameter set in the
                # metadata, so some matches may reside outside the circle searched within this radius
                # Thus, increase the radius set in the metadata fed into joinMatchListWithCatalog() to
                # accommodate.
                matchmeta = srcMatch.table.getMetadata()
                rad = matchmeta.getDouble("RADIUS")
                matchmeta.setDouble(
                    "RADIUS", rad * 1.05,
                    "field radius in degrees, approximate, padded")
            matches = refObjLoader.joinMatchListWithCatalog(srcMatch, sources)

            # Set the aliap map for the matched sources (i.e. the [1] attribute schema for each match)
            if self.config.srcSchemaMap is not None and hscRun is not None:
                for mm in matches:
                    aliasMap = mm[1].schema.getAliasMap()
                    for lsstName, otherName in self.config.srcSchemaMap.items(
                    ):
                        aliasMap.set(lsstName, otherName)

            if hscRun is not None:
                for slot in ("PsfFlux", "ModelFlux", "ApFlux", "InstFlux",
                             "Centroid", "Shape"):
                    getattr(matches[0][1].getTable(), "define" + slot)(getattr(
                        sources, "get" + slot + "Definition")())
                    # For some reason, the CalibFlux slot in sources is coming up as centroid_sdss, so
                    # set it to flux_naive explicitly
                    for slot in ("CalibFlux", ):
                        getattr(matches[0][1].getTable(),
                                "define" + slot)("flux_naive")
            matches = [m for m in matches if m[0] is not None]
            refSchema = matches[0][0].schema if matches else None

            if self.cterm is not None and len(matches) != 0:
                # Add a "flux" field to the input schema of the first element
                # of the match and populate it with a colorterm correct flux.
                mapper = afwTable.SchemaMapper(refSchema)
                for key, field in refSchema:
                    mapper.addMapping(key)
                fluxKey = mapper.editOutputSchema().addField(
                    "flux", type=float, doc="Reference flux")
                fluxSigmaKey = mapper.editOutputSchema().addField(
                    "fluxSigma", type=float, doc="Reference flux uncertainty")
                table = afwTable.SimpleTable.make(mapper.getOutputSchema())
                table.preallocate(len(matches))
                for match in matches:
                    newMatch = table.makeRecord()
                    newMatch.assign(match[0], mapper)
                    match[0] = newMatch
                primaryFluxKey = refSchema.find(
                    refSchema.join(self.cterm.primary, "flux")).key
                secondaryFluxKey = refSchema.find(
                    refSchema.join(self.cterm.secondary, "flux")).key
                primaryFluxSigmaKey = refSchema.find(
                    refSchema.join(self.cterm.primary, "fluxSigma")).key
                secondaryFluxSigmaKey = refSchema.find(
                    refSchema.join(self.cterm.secondary, "fluxSigma")).key
                refFlux1 = numpy.array(
                    [m[0].get(primaryFluxKey) for m in matches])
                refFlux2 = numpy.array(
                    [m[0].get(secondaryFluxKey) for m in matches])
                refFluxSigma1 = numpy.array(
                    [m[0].get(primaryFluxSigmaKey) for m in matches])
                refFluxSigma2 = numpy.array(
                    [m[0].get(secondaryFluxSigmaKey) for m in matches])
                refMag1 = -2.5 * numpy.log10(refFlux1)
                refMag2 = -2.5 * numpy.log10(refFlux2)
                refMag = self.cterm.transformMags(refMag1, refMag2)
                refFlux = numpy.power(10.0, -0.4 * refMag)
                refFluxSigma = self.cterm.propagateFluxErrors(
                    refFluxSigma1, refFluxSigma2)
                matches = [
                    self.setCatFlux(m, flux, fluxKey, fluxSigma, fluxSigmaKey)
                    for m, flux, fluxSigma in zip(matches, refFlux,
                                                  refFluxSigma) if flux == flux
                ]
            else:
                filterName = afwImage.Filter(calexp_md).getName()
                refFluxField = measAlg.getRefFluxField(refSchema, filterName)
                refSchema.getAliasMap().set("flux", refFluxField)

            # LSST reads in a_net catalogs with flux in "janskys", so must convert back to DN.
            matches = mosaicUtils.matchJanskyToDn(matches)

            selSources = self.selectStars(sources,
                                          self.config.includeSaturated)
            selMatches = self.selectStars(matches,
                                          self.config.includeSaturated)

            retSrc = list()
            retMatch = list()

            if len(selMatches) > self.config.minNumMatch:
                naxis1, naxis2 = afwImage.bboxFromMetadata(
                    calexp_md).getDimensions()
                if hscRun is None:
                    if nQuarter % 2 != 0:
                        naxis1, naxis2 = naxis2, naxis1
                bbox = afwGeom.Box2I(afwGeom.Point2I(0, 0),
                                     afwGeom.Extent2I(naxis1, naxis2))
                cellSet = afwMath.SpatialCellSet(bbox, self.config.cellSize,
                                                 self.config.cellSize)
                for s in selSources:
                    if numpy.isfinite(s.getRa().asDegrees()):  # get rid of NaN
                        src = measMosaic.Source(s)
                        src.setExp(dataId["visit"])
                        src.setChip(dataId["ccd"])
                        try:
                            tmp = measMosaic.SpatialCellSource(src)
                            cellSet.insertCandidate(tmp)
                        except:
                            self.log.info(
                                "FAILED TO INSERT CANDIDATE: visit=%d ccd=%d x=%f y=%f"
                                % (dataRef.dataId["visit"], dataRef.
                                   dataId["ccd"], src.getX(), src.getY()) +
                                " bbox=" + str(bbox))
                for cell in cellSet.getCellList():
                    cell.sortCandidates()
                    for i, cand in enumerate(cell):
                        src = cand.getSource()
                        retSrc.append(src)
                        if i == self.config.nStarPerCell - 1:
                            break
                for m in selMatches:
                    if m[0] is not None and m[1] is not None:
                        match = (measMosaic.Source(m[0], wcs),
                                 measMosaic.Source(m[1]))
                        match[1].setExp(dataId["visit"])
                        match[1].setChip(dataId["ccd"])
                        retMatch.append(match)
            else:
                self.log.info(
                    "%8d %3d : %d/%d matches  Suspicious to wrong match. Ignore this CCD"
                    % (dataRef.dataId["visit"], dataRef.dataId["ccd"],
                       len(selMatches), len(matches)))

        except Exception as e:
            self.log.warn("Failed to read %s: %s" % (dataId, e))
            return dataId, [None, None, None]

        return dataId, [retSrc, retMatch, wcs]
コード例 #10
0
def plotsForField(inButler, keys, fixup, plots=None, prefix=''):
    if plots is None:
        plots = ['photom', 'matches', 'corr', 'distortion']

    filters = inButler.queryMetadata('raw', 'filter', **keys)
    print('Filters:', filters)
    filterName = filters[0]

    psources = inButler.get('icSrc', **keys)
    # since the butler does lazy evaluation, we don't know if it fails until...
    try:
        print('Got sources', psources)
    except:
        print('"icSrc" not found.  Trying "src" instead.')
        psources = inButler.get('src', **keys)
        print('Got sources', psources)

    pmatches = inButler.get('icMatch', **keys)
    sources = psources.getSources()

    calexp = inButler.get('calexp', **keys)
    wcs = calexp.getWcs()

    photocal = calexp.getCalib()
    zp = photocal.getMagnitude(1.)
    print('Zeropoint is', zp)

    # ref sources
    W, H = calexp.getWidth(), calexp.getHeight()

    log = Log.getDefaultLogger()
    log.setLevel(Log.DEBUG)

    kwargs = {}
    if fixup:
        # ugh, mask and offset req'd because source ids are assigned at write-time
        # and match list code made a deep copy before that.
        # (see svn+ssh://svn.lsstcorp.org/DMS/meas/astrom/tickets/1491-b r18027)
        kwargs['sourceIdMask'] = 0xffff
        kwargs['sourceIdOffset'] = -1

    (matches, ref) = measAstrom.generateMatchesFromMatchList(
        pmatches, sources, wcs, W, H, returnRefs=True, log=log, **kwargs)
    print('Got', len(ref), 'reference catalog sources')

    # pull 'stargal' and 'referrs' arrays out of the reference sources
    fdict = maUtils.getDetectionFlags()
    starflag = int(fdict["STAR"])
    stargal = [bool((r.getFlagForDetection() & starflag) > 0)
               for r in ref]
    referrs = [float(r.getPsfFluxErr() / r.getPsfFlux() * 2.5 / -np.log(10))
               for r in ref]
    nstars = sum([1 for s in stargal if s])
    print('Number of sources with STAR set:', nstars)

    visit = keys['visit']
    raft = keys['raft']
    sensor = keys['sensor']
    prefix += 'imsim-v%i-r%s-s%s' % (visit, raft.replace(',', ''), sensor.replace(',', ''))

    if 'photom' in plots:
        print('photometry plots...')
        tt = 'LSST ImSim v%i r%s s%s' % (visit, raft.replace(',', ''), sensor.replace(',', ''))

        wcsPlots.plotPhotometry(sources, ref, matches, prefix, band=filterName,
                                zp=zp, referrs=referrs, refstargal=stargal, title=tt)
        wcsPlots.plotPhotometry(sources, ref, matches, prefix, band=filterName, zp=zp,
                                delta=True, referrs=referrs, refstargal=stargal, title=tt)

        # test w/ and w/o referrs and stargal.
        if False:
            wcsPlots.plotPhotometry(sources, ref, matches, prefix + 'A', band=filterName, zp=zp, title=tt)
            wcsPlots.plotPhotometry(sources, ref, matches, prefix + 'B',
                                    band=filterName, zp=zp, referrs=referrs, title=tt)
            wcsPlots.plotPhotometry(sources, ref, matches, prefix + 'C',
                                    band=filterName, zp=zp, refstargal=stargal, title=tt)

            wcsPlots.plotPhotometry(sources, ref, matches, prefix + 'A',
                                    band=filterName, zp=zp, delta=True, title=tt)
            wcsPlots.plotPhotometry(sources, ref, matches, prefix + 'B', band=filterName,
                                    zp=zp, delta=True, referrs=referrs, title=tt)
            wcsPlots.plotPhotometry(sources, ref, matches, prefix + 'C', band=filterName,
                                    zp=zp, delta=True, refstargal=stargal, title=tt)

    if 'matches' in plots:
        print('matches...')
        wcsPlots.plotMatches(sources, ref, matches, wcs, W, H, prefix)

    if 'distortion' in plots:
        print('distortion...')
        wcsPlots.plotDistortion(wcs, W, H, 400, prefix,
                                'SIP Distortion (exaggerated x 10)', exaggerate=10.)
        print('distortion...')
        wcsPlots.plotDistortion(wcs, W, H, 400, prefix,
                                'SIP Distortion (exaggerated x 100)', exaggerate=100.,
                                suffix='-distort2.')
コード例 #11
0
# the GNU General Public License along with this program.  If not,
# see <http://www.lsstcorp.org/LegalNotices/>.
#

import math
import sys
import os
import time

import lsst.utils
import lsst.geom
import lsst.afw.image as afwImage
import lsst.afw.math as afwMath
from lsst.log import Log

Log.getDefaultLogger().setLevel(Log.INFO)
Log.getLogger("TRACE2.afw.math.convolve").setLevel(Log.DEBUG)

MaxIter = 20
MaxTime = 1.0  # seconds

afwdataDir = lsst.utils.getPackageDir("afwdata")

InputMaskedImagePath = os.path.join(afwdataDir, "data", "med.fits")


def getSpatialParameters(nKernelParams, func):
    """Get basic spatial parameters list

    You may wish to tweak it up for specific cases (especially the lower order terms)
    """
コード例 #12
0
def run(visit, rerun, config):
    mapper = getMapper()
    dataId = {'visit': visit, 'rerun': rerun}
    rrdir = mapper.getPath('outdir', dataId)
    if not os.path.exists(rrdir):
        print('Creating directory for ouputs:', rrdir)
        os.makedirs(rrdir)
    else:
        print('Output directory:', rrdir)
    io = pipReadWrite.ReadWrite(mapper, ['visit'], config=config)
    # ccdProc = pipProcCcd.ProcessCcd(config=config, Isr=NullISR, Calibrate=MyCalibrate)
    # raws = io.readRaw(dataId)
    # detrends = io.detrends(dataId, config)
    print('Reading exposure')
    # exposure = io.read('visitim', dataId)
    exposure = io.inButler.get('visitim', dataId)
    print('exposure is', exposure)
    print('size', exposure.getWidth(), 'x', exposure.getHeight())
    # debug
    # mi = exposure.getMaskedImage()
    # img = mi.getImage()
    # var = mi.getVariance()
    # print('var at 90,100 is', var.get(90,100))
    # print('img at 90,100 is', img.get(90,100))
    # print('wcs is', exposure.getWcs())
    wcs = exposure.getWcs()
    assert wcs
    # print('ccdProc.run()...')
    # raws = [exposure]
    # exposure, psf, apcorr, brightSources, sources, matches, matchMeta = ccdProc.run(raws, detrends)
    print('Calibrate()...')
    log = Log.getDefaultLogger()
    cal = MyCalibrate(config=config, log=log, Photometry=MyPhotometry)
    psf, sources, footprints = cal.run2(exposure)

    print('Photometry()...')
    phot = pipPhot.Photometry(config=config, log=log)
    sources, footprints = phot.run(exposure, psf)
    print('sources:', len(sources))
    for s in sources:
        print('  ', s, s.getXAstrom(), s.getYAstrom(), s.getPsfFlux(), s.getIxx(), s.getIyy(), s.getIxy())

    print('footprints:', footprints)
    # oh yeah, baby!
    fps = footprints.getFootprints()
    print(len(fps))
    bb = []
    for f in fps:
        print('  Footprint', f)
        print('  ', f.getBBox())
        bbox = f.getBBox()
        bb.append((bbox.getMinX(), bbox.getMinY(), bbox.getMaxX(), bbox.getMaxY()))
        print('   # peaks:', len(f.getPeaks()))
        for p in f.getPeaks():
            print('    Peak', p)
    # print('psf', psf)
    # print('sources', sources)
    # print('footprints', footprints)
    # psf, apcorr, brightSources, matches, matchMeta = self.calibrate(exposure, defects=defects)
    # if self.config['do']['phot']:
    #     sources, footprints = self.phot(exposure, psf, apcorr, wcs=exposure.getWcs())
    # psf, wcs = self.fakePsf(exposure)
    # sources, footprints = self.phot(exposure, psf)
    # sources = self.rephot(exposure, footprints, psf, apcorr=apcorr)
    # model = calibrate['model']
    # fwhm = calibrate['fwhm'] / wcs.pixelScale()
    # size = calibrate['size']
    #  psf = afwDet.createPsf(model, size, size, fwhm/(2*math.sqrt(2*math.log(2))))
    # print('done!')
    print('writing output...')
    io.write(dataId, psf=psf, sources=sources)
    print('done!')
    print('Writing bounding-boxes...')
    io.outButler.put(bb, 'bb', dataId)

    # print('Writing footprints...')
    # io.outButler.put(fps, 'footprints', dataId)

    # serialize a python version of footprints & peaks;
    # commented out because footprintsToPython does not exist
    # pyfoots = footprintsToPython(fps)
    # print('Writing py footprints...')
    # io.outButler.put(pyfoots, 'pyfoots', dataId)

    return bb
コード例 #13
0
ファイル: utils.py プロジェクト: HyperSuprime-Cam/ip_diffim
def plotPixelResiduals(exposure, warpedTemplateExposure, diffExposure, kernelCellSet,
                       kernel, background, testSources, config,
                       origVariance=False, nptsFull=1e6, keepPlots=True, titleFs=14):
    """Plot diffim residuals for LOCAL and SPATIAL models.
    """
    candidateResids = []
    spatialResids = []
    nonfitResids = []

    for cell in kernelCellSet.getCellList():
        for cand in cell.begin(True):  # only look at good ones
            # Be sure
            if not (cand.getStatus() == afwMath.SpatialCellCandidate.GOOD):
                continue

            diffim = cand.getDifferenceImage(diffimLib.KernelCandidateF.ORIG)
            orig = cand.getScienceMaskedImage()

            ski = afwImage.ImageD(kernel.getDimensions())
            kernel.computeImage(ski, False, int(cand.getXCenter()), int(cand.getYCenter()))
            sk = afwMath.FixedKernel(ski)
            sbg = background(int(cand.getXCenter()), int(cand.getYCenter()))
            sdiffim = cand.getDifferenceImage(sk, sbg)

            # trim edgs due to convolution
            bbox = kernel.shrinkBBox(diffim.getBBox())
            tdiffim = diffim.Factory(diffim, bbox)
            torig = orig.Factory(orig, bbox)
            tsdiffim = sdiffim.Factory(sdiffim, bbox)

            if origVariance:
                candidateResids.append(np.ravel(tdiffim.getImage().getArray() /
                                                np.sqrt(torig.getVariance().getArray())))
                spatialResids.append(np.ravel(tsdiffim.getImage().getArray() /
                                              np.sqrt(torig.getVariance().getArray())))
            else:
                candidateResids.append(np.ravel(tdiffim.getImage().getArray() /
                                                np.sqrt(tdiffim.getVariance().getArray())))
                spatialResids.append(np.ravel(tsdiffim.getImage().getArray() /
                                              np.sqrt(tsdiffim.getVariance().getArray())))

    fullIm = diffExposure.getMaskedImage().getImage().getArray()
    fullMask = diffExposure.getMaskedImage().getMask().getArray()
    if origVariance:
        fullVar = exposure.getMaskedImage().getVariance().getArray()
    else:
        fullVar = diffExposure.getMaskedImage().getVariance().getArray()

    bitmaskBad = 0
    bitmaskBad |= afwImage.Mask.getPlaneBitMask('NO_DATA')
    bitmaskBad |= afwImage.Mask.getPlaneBitMask('SAT')
    idx = np.where((fullMask & bitmaskBad) == 0)
    stride = int(len(idx[0])//nptsFull)
    sidx = idx[0][::stride], idx[1][::stride]
    allResids = fullIm[sidx]/np.sqrt(fullVar[sidx])

    testFootprints = diffimTools.sourceToFootprintList(testSources, warpedTemplateExposure,
                                                       exposure, config, Log.getDefaultLogger())
    for fp in testFootprints:
        subexp = diffExposure.Factory(diffExposure, fp["footprint"].getBBox())
        subim = subexp.getMaskedImage().getImage()
        if origVariance:
            subvar = afwImage.ExposureF(exposure, fp["footprint"].getBBox()).getMaskedImage().getVariance()
        else:
            subvar = subexp.getMaskedImage().getVariance()
        nonfitResids.append(np.ravel(subim.getArray()/np.sqrt(subvar.getArray())))

    candidateResids = np.ravel(np.array(candidateResids))
    spatialResids = np.ravel(np.array(spatialResids))
    nonfitResids = np.ravel(np.array(nonfitResids))

    try:
        import pylab
        from matplotlib.font_manager import FontProperties
    except ImportError as e:
        print("Unable to import pylab: %s" % e)
        return

    fig = pylab.figure()
    fig.clf()
    try:
        fig.canvas._tkcanvas._root().lift()  # == Tk's raise, but raise is a python reserved word
    except Exception:                                 # protect against API changes
        pass
    if origVariance:
        fig.suptitle("Diffim residuals: Normalized by sqrt(input variance)", fontsize=titleFs)
    else:
        fig.suptitle("Diffim residuals: Normalized by sqrt(diffim variance)", fontsize=titleFs)

    sp1 = pylab.subplot(221)
    sp2 = pylab.subplot(222, sharex=sp1, sharey=sp1)
    sp3 = pylab.subplot(223, sharex=sp1, sharey=sp1)
    sp4 = pylab.subplot(224, sharex=sp1, sharey=sp1)
    xs = np.arange(-5, 5.05, 0.1)
    ys = 1./np.sqrt(2*np.pi)*np.exp(-0.5*xs**2)

    sp1.hist(candidateResids, bins=xs, normed=True, alpha=0.5, label="N(%.2f, %.2f)"
             % (np.mean(candidateResids), np.var(candidateResids)))
    sp1.plot(xs, ys, "r-", lw=2, label="N(0,1)")
    sp1.set_title("Candidates: basis fit", fontsize=titleFs - 2)
    sp1.legend(loc=1, fancybox=True, shadow=True, prop=FontProperties(size=titleFs - 6))

    sp2.hist(spatialResids, bins=xs, normed=True, alpha=0.5, label="N(%.2f, %.2f)"
             % (np.mean(spatialResids), np.var(spatialResids)))
    sp2.plot(xs, ys, "r-", lw=2, label="N(0,1)")
    sp2.set_title("Candidates: spatial fit", fontsize=titleFs - 2)
    sp2.legend(loc=1, fancybox=True, shadow=True, prop=FontProperties(size=titleFs - 6))

    sp3.hist(nonfitResids, bins=xs, normed=True, alpha=0.5, label="N(%.2f, %.2f)"
             % (np.mean(nonfitResids), np.var(nonfitResids)))
    sp3.plot(xs, ys, "r-", lw=2, label="N(0,1)")
    sp3.set_title("Control sample: spatial fit", fontsize=titleFs - 2)
    sp3.legend(loc=1, fancybox=True, shadow=True, prop=FontProperties(size=titleFs - 6))

    sp4.hist(allResids, bins=xs, normed=True, alpha=0.5, label="N(%.2f, %.2f)"
             % (np.mean(allResids), np.var(allResids)))
    sp4.plot(xs, ys, "r-", lw=2, label="N(0,1)")
    sp4.set_title("Full image (subsampled)", fontsize=titleFs - 2)
    sp4.legend(loc=1, fancybox=True, shadow=True, prop=FontProperties(size=titleFs - 6))

    pylab.setp(sp1.get_xticklabels() + sp1.get_yticklabels(), fontsize=titleFs - 4)
    pylab.setp(sp2.get_xticklabels() + sp2.get_yticklabels(), fontsize=titleFs - 4)
    pylab.setp(sp3.get_xticklabels() + sp3.get_yticklabels(), fontsize=titleFs - 4)
    pylab.setp(sp4.get_xticklabels() + sp4.get_yticklabels(), fontsize=titleFs - 4)

    sp1.set_xlim(-5, 5)
    sp1.set_ylim(0, 0.5)
    fig.show()

    global keptPlots
    if keepPlots and not keptPlots:
        # Keep plots open when done
        def show():
            print("%s: Please close plots when done." % __name__)
            try:
                pylab.show()
            except Exception:
                pass
            print("Plots closed, exiting...")
        import atexit
        atexit.register(show)
        keptPlots = True
コード例 #14
0
    def __call__(self, args):
        """Run the Task on a single target.

        Parameters
        ----------
        args
            Arguments for Task.runDataRef()

        Returns
        -------
        struct : `lsst.pipe.base.Struct`
            Contains these fields if ``doReturnResults`` is `True`:

            - ``dataRef``: the provided data reference.
            - ``metadata``: task metadata after execution of run.
            - ``result``: result returned by task run, or `None` if the task fails.
            - ``exitStatus``: 0 if the task completed successfully, 1 otherwise.

            If ``doReturnResults`` is `False` the struct contains:

            - ``exitStatus``: 0 if the task completed successfully, 1 otherwise.

        Notes
        -----
        This default implementation assumes that the ``args`` is a tuple containing a data reference and a
        dict of keyword arguments.

        .. warning::

           If you override this method and wish to return something when ``doReturnResults`` is `False`,
           then it must be picklable to support multiprocessing and it should be small enough that pickling
           and unpickling do not add excessive overhead.
        """
        dataRef, kwargs = args
        if self.log is None:
            self.log = Log.getDefaultLogger()
        if hasattr(dataRef, "dataId"):
            self.log.MDC("LABEL", str(dataRef.dataId))
        elif isinstance(dataRef, (list, tuple)):
            self.log.MDC("LABEL", str([ref.dataId for ref in dataRef if hasattr(ref, "dataId")]))
        task = self.makeTask(args=args)
        result = None                   # in case the task fails
        exitStatus = 0                  # exit status for the shell
        if self.doRaise:
            result = self.runTask(task, dataRef, kwargs)
        else:
            try:
                result = self.runTask(task, dataRef, kwargs)
            except Exception as e:
                # The shell exit value will be the number of dataRefs returning
                # non-zero, so the actual value used here is lost.
                exitStatus = 1

                # don't use a try block as we need to preserve the original exception
                eName = type(e).__name__
                if hasattr(dataRef, "dataId"):
                    task.log.fatal("Failed on dataId=%s: %s: %s", dataRef.dataId, eName, e)
                elif isinstance(dataRef, (list, tuple)):
                    task.log.fatal("Failed on dataIds=[%s]: %s: %s",
                                   ", ".join(str(ref.dataId) for ref in dataRef), eName, e)
                else:
                    task.log.fatal("Failed on dataRef=%s: %s: %s", dataRef, eName, e)

                if not isinstance(e, TaskError):
                    traceback.print_exc(file=sys.stderr)

        # Ensure all errors have been logged and aren't hanging around in a buffer
        sys.stdout.flush()
        sys.stderr.flush()

        task.writeMetadata(dataRef)

        # remove MDC so it does not show up outside of task context
        self.log.MDCRemove("LABEL")

        if self.doReturnResults:
            return Struct(
                exitStatus=exitStatus,
                dataRef=dataRef,
                metadata=task.metadata,
                result=result,
            )
        else:
            return Struct(
                exitStatus=exitStatus,
            )
コード例 #15
0
ファイル: test_IndexExposure.py プロジェクト: lsst/daf_ingest
 def test_basic(self):
     """Perform basic correctness testing."""
     ps = []
     # Construct property sets for two exposures centered on the equator
     for center in ((0.0, 0.0), (180.0, 0.0)):
         props = daf_base.PropertySet()
         props.add("NAXIS1", 9)
         props.add("NAXIS2", 9)
         props.add("RADECSYS", "ICRS")
         props.add("EQUINOX", 2000.0)
         props.add("CTYPE1", "RA---TAN")
         props.add("CTYPE2", "DEC--TAN")
         props.add("CRPIX1", 5.0)
         props.add("CRPIX2", 5.0)
         props.add("CRVAL1", center[0])
         props.add("CRVAL2", center[1])
         props.add("CD1_1", 1.0)
         props.add("CD2_1", 0.0)
         props.add("CD1_2", 0.0)
         props.add("CD2_2", 1.0)
         ps.append(props)
     # Retain one as is, and create an exposure from the other
     inputs = [
         ps[0],
         afw_image.ExposureF(8, 8, afw_image.makeWcs(ps[1]))
     ]
     # Test data-ids are just integers.
     refs = [MockDataRef(i, v) for i, v in enumerate(inputs)]
     config = IndexExposureConfig()
     config.allow_replace = True
     config.defer_writes = True
     config.init_statements = ['PRAGMA page_size = 4096']
     database = sqlite3.connect(":memory:")
     # Avoid the command line parser.
     parsed_cmd = pipe_base.Struct(
         config=config,
         log=Log.getDefaultLogger(),
         doraise=True,
         clobberConfig=False,
         noBackupConfig=False,
         database=database,
         dstype="bogus",
         id=pipe_base.Struct(refList=refs),
     )
     runner = IndexExposureRunner(IndexExposureTask, parsed_cmd)
     runner.run(parsed_cmd)
     # Re-ingest to test that allow_replace=True works. Toggle off
     # the deferred writes to test that as well.
     runner.config.defer_writes = False
     runner.run(parsed_cmd)
     # Re-ingest to test that allow_replace=False raises an exception.
     runner.config.allow_replace = False
     with self.assertRaises(Exception):
         runner.run(parsed_cmd)
     # Now, verify the contents of the database. First, check that
     # data ids are recoverable.
     data_ids = sorted(pickle.loads(str(r[0])) for r in database.execute(
         "SELECT pickled_data_id FROM exposure"))
     self.assertEqual(data_ids, [0, 1])
     # Next, run a spatial query and check that it returns the
     # expected results.
     center = sphgeom.UnitVector3d(sphgeom.LonLat.fromDegrees(4.0, 1.0))
     circle = sphgeom.Circle(center, sphgeom.Angle.fromDegrees(1.5))
     results = find_intersecting_exposures(database, circle)
     self.assertEqual(len(results), 1)
     info = results[0]
     # The first input exposure should have been returned, and
     # should intersect the query region
     self.assertEqual(info.data_id, 0)
     self.assertEqual(circle.relate(info.boundary), sphgeom.INTERSECTS)
     database.close()
コード例 #16
0
# the GNU General Public License along with this program.  If not,
# see <http://www.lsstcorp.org/LegalNotices/>.
#

import math
import sys
import os
import time

import lsst.utils
import lsst.afw.image as afwImage
import lsst.afw.math as afwMath
import lsst.afw.geom as afwGeom
from lsst.log import Log

Log.getDefaultLogger().setLevel(Log.INFO)
Log.getLogger("TRACE2.afw.math.convolve").setLevel(Log.DEBUG)

MaxIter = 20
MaxTime = 1.0  # seconds

afwdataDir = lsst.utils.getPackageDir("afwdata")

InputMaskedImagePath = os.path.join(afwdataDir, "data", "med.fits")


def getSpatialParameters(nKernelParams, func):
    """Get basic spatial parameters list

    You may wish to tweak it up for specific cases (especially the lower order terms)
    """
コード例 #17
0
def plotPixelResiduals(exposure, warpedTemplateExposure, diffExposure, kernelCellSet,
                       kernel, background, testSources, config,
                       origVariance=False, nptsFull=1e6, keepPlots=True, titleFs=14):
    """Plot diffim residuals for LOCAL and SPATIAL models.
    """
    candidateResids = []
    spatialResids = []
    nonfitResids = []

    for cell in kernelCellSet.getCellList():
        for cand in cell.begin(True):  # only look at good ones
            # Be sure
            if not (cand.getStatus() == afwMath.SpatialCellCandidate.GOOD):
                continue

            diffim = cand.getDifferenceImage(diffimLib.KernelCandidateF.ORIG)
            orig = cand.getScienceMaskedImage()

            ski = afwImage.ImageD(kernel.getDimensions())
            kernel.computeImage(ski, False, int(cand.getXCenter()), int(cand.getYCenter()))
            sk = afwMath.FixedKernel(ski)
            sbg = background(int(cand.getXCenter()), int(cand.getYCenter()))
            sdiffim = cand.getDifferenceImage(sk, sbg)

            # trim edgs due to convolution
            bbox = kernel.shrinkBBox(diffim.getBBox())
            tdiffim = diffim.Factory(diffim, bbox)
            torig = orig.Factory(orig, bbox)
            tsdiffim = sdiffim.Factory(sdiffim, bbox)

            if origVariance:
                candidateResids.append(np.ravel(tdiffim.getImage().getArray() /
                                                np.sqrt(torig.getVariance().getArray())))
                spatialResids.append(np.ravel(tsdiffim.getImage().getArray() /
                                              np.sqrt(torig.getVariance().getArray())))
            else:
                candidateResids.append(np.ravel(tdiffim.getImage().getArray() /
                                                np.sqrt(tdiffim.getVariance().getArray())))
                spatialResids.append(np.ravel(tsdiffim.getImage().getArray() /
                                              np.sqrt(tsdiffim.getVariance().getArray())))

    fullIm = diffExposure.getMaskedImage().getImage().getArray()
    fullMask = diffExposure.getMaskedImage().getMask().getArray()
    if origVariance:
        fullVar = exposure.getMaskedImage().getVariance().getArray()
    else:
        fullVar = diffExposure.getMaskedImage().getVariance().getArray()

    bitmaskBad = 0
    bitmaskBad |= afwImage.Mask.getPlaneBitMask('NO_DATA')
    bitmaskBad |= afwImage.Mask.getPlaneBitMask('SAT')
    idx = np.where((fullMask & bitmaskBad) == 0)
    stride = int(len(idx[0])//nptsFull)
    sidx = idx[0][::stride], idx[1][::stride]
    allResids = fullIm[sidx]/np.sqrt(fullVar[sidx])

    testFootprints = diffimTools.sourceToFootprintList(testSources, warpedTemplateExposure,
                                                       exposure, config, Log.getDefaultLogger())
    for fp in testFootprints:
        subexp = diffExposure.Factory(diffExposure, fp["footprint"].getBBox())
        subim = subexp.getMaskedImage().getImage()
        if origVariance:
            subvar = afwImage.ExposureF(exposure, fp["footprint"].getBBox()).getMaskedImage().getVariance()
        else:
            subvar = subexp.getMaskedImage().getVariance()
        nonfitResids.append(np.ravel(subim.getArray()/np.sqrt(subvar.getArray())))

    candidateResids = np.ravel(np.array(candidateResids))
    spatialResids = np.ravel(np.array(spatialResids))
    nonfitResids = np.ravel(np.array(nonfitResids))

    try:
        import pylab
        from matplotlib.font_manager import FontProperties
    except ImportError as e:
        print("Unable to import pylab: %s" % e)
        return

    fig = pylab.figure()
    fig.clf()
    try:
        fig.canvas._tkcanvas._root().lift()  # == Tk's raise, but raise is a python reserved word
    except Exception:                                 # protect against API changes
        pass
    if origVariance:
        fig.suptitle("Diffim residuals: Normalized by sqrt(input variance)", fontsize=titleFs)
    else:
        fig.suptitle("Diffim residuals: Normalized by sqrt(diffim variance)", fontsize=titleFs)

    sp1 = pylab.subplot(221)
    sp2 = pylab.subplot(222, sharex=sp1, sharey=sp1)
    sp3 = pylab.subplot(223, sharex=sp1, sharey=sp1)
    sp4 = pylab.subplot(224, sharex=sp1, sharey=sp1)
    xs = np.arange(-5, 5.05, 0.1)
    ys = 1./np.sqrt(2*np.pi)*np.exp(-0.5*xs**2)

    sp1.hist(candidateResids, bins=xs, normed=True, alpha=0.5, label="N(%.2f, %.2f)"
             % (np.mean(candidateResids), np.var(candidateResids)))
    sp1.plot(xs, ys, "r-", lw=2, label="N(0,1)")
    sp1.set_title("Candidates: basis fit", fontsize=titleFs - 2)
    sp1.legend(loc=1, fancybox=True, shadow=True, prop=FontProperties(size=titleFs - 6))

    sp2.hist(spatialResids, bins=xs, normed=True, alpha=0.5, label="N(%.2f, %.2f)"
             % (np.mean(spatialResids), np.var(spatialResids)))
    sp2.plot(xs, ys, "r-", lw=2, label="N(0,1)")
    sp2.set_title("Candidates: spatial fit", fontsize=titleFs - 2)
    sp2.legend(loc=1, fancybox=True, shadow=True, prop=FontProperties(size=titleFs - 6))

    sp3.hist(nonfitResids, bins=xs, normed=True, alpha=0.5, label="N(%.2f, %.2f)"
             % (np.mean(nonfitResids), np.var(nonfitResids)))
    sp3.plot(xs, ys, "r-", lw=2, label="N(0,1)")
    sp3.set_title("Control sample: spatial fit", fontsize=titleFs - 2)
    sp3.legend(loc=1, fancybox=True, shadow=True, prop=FontProperties(size=titleFs - 6))

    sp4.hist(allResids, bins=xs, normed=True, alpha=0.5, label="N(%.2f, %.2f)"
             % (np.mean(allResids), np.var(allResids)))
    sp4.plot(xs, ys, "r-", lw=2, label="N(0,1)")
    sp4.set_title("Full image (subsampled)", fontsize=titleFs - 2)
    sp4.legend(loc=1, fancybox=True, shadow=True, prop=FontProperties(size=titleFs - 6))

    pylab.setp(sp1.get_xticklabels() + sp1.get_yticklabels(), fontsize=titleFs - 4)
    pylab.setp(sp2.get_xticklabels() + sp2.get_yticklabels(), fontsize=titleFs - 4)
    pylab.setp(sp3.get_xticklabels() + sp3.get_yticklabels(), fontsize=titleFs - 4)
    pylab.setp(sp4.get_xticklabels() + sp4.get_yticklabels(), fontsize=titleFs - 4)

    sp1.set_xlim(-5, 5)
    sp1.set_ylim(0, 0.5)
    fig.show()

    global keptPlots
    if keepPlots and not keptPlots:
        # Keep plots open when done
        def show():
            print("%s: Please close plots when done." % __name__)
            try:
                pylab.show()
            except Exception:
                pass
            print("Plots closed, exiting...")
        import atexit
        atexit.register(show)
        keptPlots = True
コード例 #18
0
def plotsForField(inButler, keys, fixup, plots=None, prefix=''):
    if plots is None:
        plots = ['photom', 'matches', 'corr', 'distortion']

    filters = inButler.queryMetadata('raw', 'filter', **keys)
    print('Filters:', filters)
    filterName = filters[0]

    try:
        psources = inButler.get('icSrc', **keys)
        print('Got sources', psources)
    except Exception:
        print('"icSrc" not found.  Trying "src" instead.')
        psources = inButler.get('src', **keys)
        print('Got sources', psources)

    pmatches = inButler.get('icMatch', **keys)
    sources = psources.getSources()

    calexp = inButler.get('calexp', **keys)
    wcs = calexp.getWcs()

    photocal = calexp.getCalib()
    zp = photocal.getMagnitude(1.)
    print('Zeropoint is', zp)

    # ref sources
    W, H = calexp.getWidth(), calexp.getHeight()

    log = Log.getDefaultLogger()
    log.setLevel(Log.DEBUG)

    kwargs = {}
    if fixup:
        # ugh, mask and offset req'd because source ids are assigned at write-time
        # and match list code made a deep copy before that.
        # (see svn+ssh://svn.lsstcorp.org/DMS/meas/astrom/tickets/1491-b r18027)
        kwargs['sourceIdMask'] = 0xffff
        kwargs['sourceIdOffset'] = -1

    (matches, ref) = measAstrom.generateMatchesFromMatchList(
        pmatches, sources, wcs, W, H, returnRefs=True, log=log, **kwargs)
    print('Got', len(ref), 'reference catalog sources')

    # pull 'stargal' and 'referrs' arrays out of the reference sources
    fdict = maUtils.getDetectionFlags()
    starflag = int(fdict["STAR"])
    stargal = [bool((r.getFlagForDetection() & starflag) > 0)
               for r in ref]
    referrs = [float(r.getPsfInstFluxErr() / r.getPsfInstFlux() * 2.5 / -np.log(10))
               for r in ref]
    nstars = sum([1 for s in stargal if s])
    print('Number of sources with STAR set:', nstars)

    visit = keys['visit']
    raft = keys['raft']
    sensor = keys['sensor']
    prefix += 'imsim-v%i-r%s-s%s' % (visit, raft.replace(',', ''), sensor.replace(',', ''))

    if 'photom' in plots:
        print('photometry plots...')
        tt = 'LSST ImSim v%i r%s s%s' % (visit, raft.replace(',', ''), sensor.replace(',', ''))

        wcsPlots.plotPhotometry(sources, ref, matches, prefix, band=filterName,
                                zp=zp, referrs=referrs, refstargal=stargal, title=tt)
        wcsPlots.plotPhotometry(sources, ref, matches, prefix, band=filterName, zp=zp,
                                delta=True, referrs=referrs, refstargal=stargal, title=tt)

        # test w/ and w/o referrs and stargal.
        if False:
            wcsPlots.plotPhotometry(sources, ref, matches, prefix + 'A', band=filterName, zp=zp, title=tt)
            wcsPlots.plotPhotometry(sources, ref, matches, prefix + 'B',
                                    band=filterName, zp=zp, referrs=referrs, title=tt)
            wcsPlots.plotPhotometry(sources, ref, matches, prefix + 'C',
                                    band=filterName, zp=zp, refstargal=stargal, title=tt)

            wcsPlots.plotPhotometry(sources, ref, matches, prefix + 'A',
                                    band=filterName, zp=zp, delta=True, title=tt)
            wcsPlots.plotPhotometry(sources, ref, matches, prefix + 'B', band=filterName,
                                    zp=zp, delta=True, referrs=referrs, title=tt)
            wcsPlots.plotPhotometry(sources, ref, matches, prefix + 'C', band=filterName,
                                    zp=zp, delta=True, refstargal=stargal, title=tt)

    if 'matches' in plots:
        print('matches...')
        wcsPlots.plotMatches(sources, ref, matches, wcs, W, H, prefix)

    if 'distortion' in plots:
        print('distortion...')
        wcsPlots.plotDistortion(wcs, W, H, 400, prefix,
                                'SIP Distortion (exaggerated x 10)', exaggerate=10.)
        print('distortion...')
        wcsPlots.plotDistortion(wcs, W, H, 400, prefix,
                                'SIP Distortion (exaggerated x 100)', exaggerate=100.,
                                suffix='-distort2.')
コード例 #19
0
    def __call__(self, args):
        """!Run the Task on a single target.

        This default implementation assumes that the 'args' is a tuple
        containing a data reference and a dict of keyword arguments.

        @warning if you override this method and wish to return something
        when doReturnResults is false, then it must be picklable to support
        multiprocessing and it should be small enough that pickling and
        unpickling do not add excessive overhead.

        @param args     Arguments for Task.run()

        @return:
        - None if doReturnResults false
        - A pipe_base Struct containing these fields if doReturnResults true:
            - dataRef: the provided data reference
            - metadata: task metadata after execution of run
            - result: result returned by task run, or None if the task fails
        """
        dataRef, kwargs = args
        if self.log is None:
            self.log = Log.getDefaultLogger()
        if hasattr(dataRef, "dataId"):
            self.log.MDC("LABEL", str(dataRef.dataId))
        elif isinstance(dataRef, (list, tuple)):
            self.log.MDC(
                "LABEL",
                str([ref.dataId for ref in dataRef if hasattr(ref, "dataId")]))
        task = self.makeTask(args=args)
        result = None  # in case the task fails
        exitStatus = 0  # exit status for the shell
        if self.doRaise:
            result = task.run(dataRef, **kwargs)
        else:
            try:
                result = task.run(dataRef, **kwargs)
            except Exception as e:
                exitStatus = 1  # n.b. The shell exit value is the number of dataRefs returning
                # non-zero, so the actual value used here is lost

                # don't use a try block as we need to preserve the original exception
                if hasattr(dataRef, "dataId"):
                    task.log.fatal("Failed on dataId=%s: %s", dataRef.dataId,
                                   e)
                elif isinstance(dataRef, (list, tuple)):
                    task.log.fatal(
                        "Failed on dataId=[%s]: %s",
                        ", ".join(str(ref.dataId) for ref in dataRef), e)
                else:
                    task.log.fatal("Failed on dataRef=%s: %s", dataRef, e)

                if not isinstance(e, TaskError):
                    traceback.print_exc(file=sys.stderr)
        task.writeMetadata(dataRef)

        # remove MDC so it does not show up outside of task context
        self.log.MDCRemove("LABEL")

        if self.doReturnResults:
            return Struct(
                exitStatus=exitStatus,
                dataRef=dataRef,
                metadata=task.metadata,
                result=result,
            )
        else:
            return Struct(exitStatus=exitStatus, )
コード例 #20
0
def printProcessStats():
    """Print the process statistics to the log"""
    from lsst.log import Log
    log = Log.getDefaultLogger()
    log.info("Process stats for %s: %s" % (NODE, processStats()))
コード例 #21
0
ファイル: cmdLineTask.py プロジェクト: triangulum/pipe_base
    def __call__(self, args):
        """Run the Task on a single target.

        Parameters
        ----------
        args
            Arguments for Task.runDataRef()

        Returns
        -------
        struct : `lsst.pipe.base.Struct`
            Contains these fields if ``doReturnResults`` is `True`:

            - ``dataRef``: the provided data reference.
            - ``metadata``: task metadata after execution of run.
            - ``result``: result returned by task run, or `None` if the task fails.
            - ``exitStatus``: 0 if the task completed successfully, 1 otherwise.

            If ``doReturnResults`` is `False` the struct contains:

            - ``exitStatus``: 0 if the task completed successfully, 1 otherwise.

        Notes
        -----
        This default implementation assumes that the ``args`` is a tuple containing a data reference and a
        dict of keyword arguments.

        .. warning::

           If you override this method and wish to return something when ``doReturnResults`` is `False`,
           then it must be picklable to support multiprocessing and it should be small enough that pickling
           and unpickling do not add excessive overhead.
        """
        dataRef, kwargs = args
        if self.log is None:
            self.log = Log.getDefaultLogger()
        if hasattr(dataRef, "dataId"):
            self.log.MDC("LABEL", str(dataRef.dataId))
        elif isinstance(dataRef, (list, tuple)):
            self.log.MDC(
                "LABEL",
                str([ref.dataId for ref in dataRef if hasattr(ref, "dataId")]))
        task = self.makeTask(args=args)
        result = None  # in case the task fails
        exitStatus = 0  # exit status for the shell
        if self.doRaise:
            result = self.runTask(task, dataRef, kwargs)
        else:
            try:
                result = self.runTask(task, dataRef, kwargs)
            except Exception as e:
                # The shell exit value will be the number of dataRefs returning
                # non-zero, so the actual value used here is lost.
                exitStatus = 1

                # don't use a try block as we need to preserve the original exception
                eName = type(e).__name__
                if hasattr(dataRef, "dataId"):
                    task.log.fatal("Failed on dataId=%s: %s: %s",
                                   dataRef.dataId, eName, e)
                elif isinstance(dataRef, (list, tuple)):
                    task.log.fatal(
                        "Failed on dataIds=[%s]: %s: %s",
                        ", ".join(str(ref.dataId) for ref in dataRef), eName,
                        e)
                else:
                    task.log.fatal("Failed on dataRef=%s: %s: %s", dataRef,
                                   eName, e)

                if not isinstance(e, TaskError):
                    traceback.print_exc(file=sys.stderr)

        # Ensure all errors have been logged and aren't hanging around in a buffer
        sys.stdout.flush()
        sys.stderr.flush()

        task.writeMetadata(dataRef)

        # remove MDC so it does not show up outside of task context
        self.log.MDCRemove("LABEL")

        if self.doReturnResults:
            return Struct(
                exitStatus=exitStatus,
                dataRef=dataRef,
                metadata=task.metadata,
                result=result,
            )
        else:
            return Struct(exitStatus=exitStatus, )
コード例 #22
0
ファイル: parallel.py プロジェクト: lsst/ctrl_pool
def printProcessStats():
    """Print the process statistics to the log"""
    from lsst.log import Log
    log = Log.getDefaultLogger()
    log.info("Process stats for %s: %s" % (NODE, processStats()))