import lsst.afw.geom as afwGeom
import lsst.afw.display.ds9 as ds9
from astropy.time import Time

import re
from astropy.table import Table
import astropy.coordinates as coord
import astropy.units as u


DATADIR="/sps/lsst/data/fouchez/DC2/testCB2/tst3/output"

print(DATADIR)
exit

butler = dafPersist.Butler(DATADI)

visits=butler.queryMetadata('deepDiff_diaSrc', ['visit'], dataId={'filter': 'r'})

print(visits)

print(butler.datasetExists("deepDiff_diaSrc", {"raftName":"R22","detectorName":"S11","filter":'r', "visit":417009}))

rafts = butler.queryMetadata('deepDiff_diaSrc', ['raftName'], dataId={'filter': 'r'})

detectors = butler.queryMetadata('deepDiff_diaSrc', ['detectorName'], dataId={'filter': 'r'})

rafts = ['R22']
detectors=['S02']

multi_matches = None
Exemple #2
0
import warnings

warnings.filterwarnings("ignore")
import sqlite3
from sqlite3 import Error
import numpy as np
import pandas as pd
import lsst.daf.persistence as dp
import astropy.wcs

data_imsim = '/global/cscratch1/sd/desc/DC2/data/Run1.2i_globus_in2p3_20181217/w_2018_39/rerun/281118'  # Path to the processed images
butler = dp.Butler(data_imsim)
datarefs = butler.subset(
    'src'
).cache  # This contains information about the visits that have been processed
corners = np.array([[-0.5, -0.5], [-0.5, 3999.5], [4071.5, -0.5],
                    [4071.5, 3999.5]])

# query using butler
query = {
    'visit': [],
    'filter': [],
    'bg_mean': [],
    'bg_var': [],
    'zp': [],
    'zp_err': [],
    'llcra': [],
    'llcdec': [],
    'lrcra': [],
    'lrcdec': [],
    'ulcra': [],
    for tract in skymap:
        patches[tract.getId()] = ['%i,%i' % x.getIndex() for x in tract]
    return patches


if __name__ == '__main__':
    db_info = dict(host='scidb1.nersc.gov', database='DESC_DC1_Level_2')
    conn = desc.pserv.DbConnection(**db_info)

    dry_run = True
    dry_run = False

    create_script = 'coadd_schema.sql'

    repo = '/global/cscratch1/sd/descdm/DC1/DC1-imsim-dithered'
    butler = dp.Butler(repo)
    patches = get_patches(butler)

    tract = 0
    band = 'r'

    n = 0
    nmax = 1
    for patch in patches[tract]:
        dataId = dict(patch=patch, tract=tract, filter=band)
        try:
            catalog = butler.get('deepCoadd_meas', dataId=dataId)
        except RuntimeError as eobj:
            continue
        if not os.path.isfile(create_script):
            create_sql_schema(catalog, create_script)
Exemple #4
0
 def setUp(self):
     self.outputDir = tempfile.mkdtemp(dir=ROOT,
                                       prefix='ButlerProxyTestCase-')
     self.butler = dafPersist.Butler(self.inputDir,
                                     outPath=os.path.join(
                                         self.outputDir, "proxyOut"))
    def testExistingParents(self):
        # parents of inputs should be added to the inputs list
        butler = dp.Butler(
            outputs=dp.RepositoryArgs(mode='w',
                                      mapper=dpTest.EmptyTestMapper,
                                      root=os.path.join(self.testDir, 'a')))
        del butler
        butler = dp.Butler(inputs=os.path.join(self.testDir, 'a'),
                           outputs=os.path.join(self.testDir, 'b'))
        del butler
        butler = dp.Butler(inputs=os.path.join(self.testDir, 'b'))
        self.assertEqual(len(butler._repos.inputs()), 2)
        # verify serach order:
        self.assertEqual(butler._repos.inputs()[0].cfg.root,
                         os.path.join(self.testDir, 'b'))
        self.assertEqual(butler._repos.inputs()[1].cfg.root,
                         os.path.join(self.testDir, 'a'))
        self.assertEqual(len(butler._repos.outputs()), 0)

        butler = dp.Butler(outputs=dp.RepositoryArgs(
            cfgRoot=os.path.join(self.testDir, 'b'), mode='rw'))
        # verify serach order:
        self.assertEqual(butler._repos.inputs()[0].cfg.root,
                         os.path.join(self.testDir, 'b'))
        self.assertEqual(butler._repos.inputs()[1].cfg.root,
                         os.path.join(self.testDir, 'a'))
        self.assertEqual(len(butler._repos.outputs()), 1)
        self.assertEqual(butler._repos.outputs()[0].cfg.root,
                         os.path.join(self.testDir, 'b'))

        butler = dp.Butler(inputs=os.path.join(self.testDir, 'a'),
                           outputs=dp.RepositoryArgs(cfgRoot=os.path.join(
                               self.testDir, 'b'),
                                                     mode='rw'))
        self.assertEqual(len(butler._repos.inputs()), 2)
        # verify serach order:
        self.assertEqual(butler._repos.inputs()[0].cfg.root,
                         os.path.join(self.testDir, 'b'))
        self.assertEqual(butler._repos.inputs()[1].cfg.root,
                         os.path.join(self.testDir, 'a'))
        self.assertEqual(len(butler._repos.outputs()), 1)
        self.assertEqual(butler._repos.outputs()[0].cfg.root,
                         os.path.join(self.testDir, 'b'))

        # parents of write-only outputs must be be listed with the inputs
        with self.assertRaises(RuntimeError):
            butler = dp.Butler(outputs=os.path.join(self.testDir, 'b'))
        butler = dp.Butler(inputs=os.path.join(self.testDir, 'a'),
                           outputs=os.path.join(self.testDir, 'b'))
        self.assertEqual(len(butler._repos.inputs()), 1)
        self.assertEqual(len(butler._repos.outputs()), 1)
        self.assertEqual(butler._repos.outputs()[0].cfg.root,
                         os.path.join(self.testDir, 'b'))

        # add a new parent to an existing output
        butler = dp.Butler(
            outputs=dp.RepositoryArgs(mode='w',
                                      mapper=dpTest.EmptyTestMapper,
                                      root=os.path.join(self.testDir, 'c')))
        butler = dp.Butler(inputs=(os.path.join(self.testDir, 'a'),
                                   os.path.join(self.testDir, 'c')),
                           outputs=os.path.join(self.testDir, 'b'))

        # should raise if the input order gets reversed:
        with self.assertRaises(RuntimeError):
            butler = dp.Butler(inputs=(os.path.join(self.testDir, 'c'),
                                       os.path.join(self.testDir, 'a')),
                               outputs=os.path.join(self.testDir, 'b'))
Exemple #6
0
    def __init__(self, dataRoot):

        self.dataRoot = dataRoot

        import lsst.daf.persistence as dafPersist
        self.butler = dafPersist.Butler(dataRoot)
Exemple #7
0
def getGalaxy(rootdir, visit, ccd, tol):
    """Get list of sources which agree in position with fake ones with tol
    """
    # Call the butler
    butler = dafPersist.Butler(rootdir)
    dataId = {'visit': visit, 'ccd': ccd}
    tol = float(tol)

    # Get the source catalog and metadata
    sources = butler.get('src', dataId)
    cal_md = butler.get('calexp_md', dataId)

    # Get the X, Y locations of objects on the CCD
    srcX, srcY = sources.getX(), sources.getY()
    # Get the zeropoint
    zeropoint = (2.5 * np.log10(cal_md.get("FLUXMAG0")))
    # Get the parent ID
    parentID = sources.get('parent')
    # Check the star/galaxy separation
    extendClass = sources.get('classification.extendedness')
    # Get the nChild
    nChild = sources.get('deblend.nchild')

    # For Galaxies: Get these parameters
    # 1. Get the Kron flux and its error
    fluxKron, ferrKron = sources.get('flux.kron'), sources.get('flux.kron.err')
    magKron = (zeropoint - 2.5 * np.log10(fluxKron))
    merrKron = (2.5 / np.log(10) * (ferrKron / fluxKron))
    # X, Y locations of the fake galaxies
    fakeList = collections.defaultdict(tuple)
    # Regular Expression
    # Search for keywords like FAKE12
    fakename = re.compile('FAKE([0-9]+)')
    # Go through all the keywords
    counts = 0
    for card in cal_md.names():
        # To see if the card matches the pattern
        m = fakename.match(card)
        if m is not None:
            # Get the X,Y location for fake object
            x, y = map(float, (cal_md.get(card)).split(','))
            # Get the ID or index of the fake object
            fakeID = int(m.group(1))
            fakeList[counts] = [fakeID, x, y]
            counts += 1

    # Match the fake object to the source list
    srcIndex = collections.defaultdict(list)
    for fid, fcoord in fakeList.items():
        separation = np.sqrt(np.abs(srcX-fcoord[1])**2 +
                             np.abs(srcY-fcoord[2])**2)
        matched = (separation <= tol)
        matchId = np.where(matched)[0]
        matchSp = separation[matchId]
        sortId = [matchId for (matchSp, matchId) in
                  sorted(zip(matchSp, matchId))]
        # DEBUG:
        # print fid, fcoord, matchId
        # print sortId, sorted(matchSp), matchId
        # Select the index of all matched object
        srcIndex[fid] = sortId

    # Return the source list
    mapper = SchemaMapper(sources.schema)
    mapper.addMinimalSchema(sources.schema)
    newSchema = mapper.getOutputSchema()
    newSchema.addField('fakeId', type=int,
                       doc='id of fake source matched to position')
    srcList = SourceCatalog(newSchema)
    srcList.reserve(sum([len(s) for s in srcIndex.values()]))

    # Return a list of interesting parameters
    srcParam = []
    nFake = 0
    for matchIndex in srcIndex.values():
        # Check if there is a match
        if len(matchIndex) > 0:
            # Only select the one with the smallest separation
            # TODO: actually get the one with minimum separation
            ss = matchIndex[0]
            fakeObj = fakeList[nFake]
            diffX = srcX[ss] - fakeObj[1]
            diffY = srcY[ss] - fakeObj[2]
            paramList = (fakeObj[0], fakeObj[1], fakeObj[2],
                         magKron[ss], merrKron[ss], diffX, diffY,
                         parentID[ss], nChild[ss], extendClass[ss])
            srcParam.append(paramList)
        else:
            fakeObj = fakeList[nFake]
            paramList = (fakeObj[0], fakeObj[1], fakeObj[2],
                         0, 0, -1, -1, -1, -1, -1)
            srcParam.append(paramList)
        # Go to another fake object
        nFake += 1

    # Make a numpy record array
    srcParam = np.array(srcParam, dtype=[('fakeID', int),
                                         ('fakeX', float),
                                         ('fakeY', float),
                                         ('magKron', float),
                                         ('errKron', float),
                                         ('diffX', float),
                                         ('diffY', float),
                                         ('parentID', int),
                                         ('nChild', int),
                                         ('extendClass', float)])

    return srcIndex, srcParam, srcList, zeropoint
Exemple #8
0
def list_cc_images(
        date_filter,
        butler_path="/lsstdata/offline/teststand/NCSA_comcam/gen2repo",
        is_ccs=False,
        from_seqnum=None,
        save_file=False):
    """Create catalog of ComCam images.
    
    Functions to parse a Butler instance and write an image information
    catalog. The currently collected information is:
    
    Exposure Date, Sequence Number, Object Name, Image Type,
    Exposure Time, Filter
    
    This information can be written into a file by using the save_file
    parameter. The created file is named images_YYYYMMDD.csv.
    
    One can append to the file by using the from_seqnum parameter and
    passing it the last sequence number found previously.
    
    Attributes
    ----------
    date_filter : str
        Date to filter on in YYYY-MM-DD format.
    butler_path : str
        Path to Butler containing the relevant files.
    from_seqnum : int, optional
        Only list images after this number.
    save_file : bool, optional
        Create a CSV file with the image catalog information.
    """

    butler = dafPersist.Butler(butler_path)
    dfilter = dict(dayObs=date_filter, detector=4)
    images = butler.queryMetadata('raw',
                                  ['dayObs', 'seqnum', 'expId', 'detector'],
                                  dfilter)
    print(f"Number of Images: {len(images)}")
    if is_ccs:
        titles = ["ExpDate", "SeqNum", "ImgType", "TestType",
                  "ExpTime"]  #, "Filter"]
    else:
        titles = [
            "ExpDate", "SeqNum", "Object", "ImgType", "TestType", "ExpTime"
        ]  #, "Filter"]
    print("\t".join(titles))
    ofile = None
    if save_file:
        if is_ccs:
            head = "ccs"
        else:
            head = "arc"
        ofilename = f"{head}_images_{date_filter.replace('-', '')}.csv"
        if from_seqnum is None:
            mode = 'w'
        else:
            mode = 'a'
        ofile = open(ofilename, mode, newline='')
        writer = csv.writer(ofile)
        if from_seqnum is None:
            writer.writerow(titles)
    with warnings.catch_warnings():
        warnings.simplefilter('ignore')
        infos = []
        for image in images:
            seqnum = image[1]
            if from_seqnum is not None and seqnum <= from_seqnum:
                continue
            dataId = dict(**dfilter, seqnum=seqnum)
            raw = butler.get('raw', dataId)
            header = raw.getInfo().getMetadata().toDict()
            if is_ccs:
                info = (header['DATE-OBS'], seqnum, header['IMGTYPE'],
                        header['TESTTYPE'], header['EXPTIME']
                        )  #, header['FILTER']]
            else:
                info = (header['DATE-OBS'], seqnum, str(header['OBJECT']),
                        header['IMGTYPE'], header['TESTTYPE'],
                        header['EXPTIME'])  #, header['FILTER']]
            #print("\t".join(info))
            infos.append(info)
        infos.sort(key=operator.itemgetter(1))
        if save_file:
            for info in infos:
                writer.writerow(info)
Exemple #9
0
import lsst.daf.persistence as dafPersistence
from astropy.io import fits
import numpy as np

DATA_DIR = "/local2/ph1jxm/GOTO/processedDataV16/DATA/rerun/outID"
butler = dafPersistence.Butler(DATA_DIR)

diffexp = butler.get("deepDiff_differenceExp",
                     visit=54480,
                     ccd=4,
                     immediate=True)
diasrc = butler.get("deepDiff_diaSrc", visit=54480, ccd=4, immediate=True)
calexp = butler.get("calexp", visit=54480, ccd=4, immediate=True)

diffexpArray = diffexp.getImage().getArray()
xs, ys = diasrc.getX(), diasrc.getY()
xs, ys = xs[~np.isnan(xs)], ys[~np.isnan(ys)]

np.random.seed(1)
real_x, real_y = np.random.rand(500) * calexp.getWidth() - 20, np.random.rand(
    500) * calexp.getHeight() + 21
match = 0
i = 1
for x, y in zip(xs, ys):
    x0, y0 = int(x) - 10, int(y) - 10
    x1, y1 = int(x) + 11, int(y) + 11
    subArray = diffexpArray[y0:y1, x0:x1]
    hdu = fits.PrimaryHDU(subArray)
    dist = np.amin((real_x - x)**2 + (real_y - y)**2)
    if dist < 25:
        match += 1
Exemple #10
0
 def setUp(self):
     self.tempRoot = tempfile.mkdtemp()
     self.butler = dafPersist.Butler(root=self.tempRoot, mapper=MinMapper)
     self.butler.defineAlias(self.localTypeName,
                             self.localTypeNameIsAliasOf)
Exemple #11
0
                      default=10,
                      help='maxNumberOfPeaks',
                      type=int)

    opt, args = parser.parse_args()

    if len(args):
        parser.print_help()
        sys.exit(-1)

    if not opt.data:
        opt.data = os.path.join(os.environ['SUPRIME_DATA_DIR'], 'rerun',
                                opt.rerun)

    print('Data directory:', opt.data)
    butler = dafPersist.Butler(opt.data)
    dataId = dict(visit=opt.visit, ccd=opt.ccd)
    ps = PlotSequence('deb')

    sources = None
    if opt.sources:
        flags = 0
        sources = afwTable.SourceCatalog.readFits(opt.sources, opt.hdu, flags)
        print('Read sources from', opt.sources, ':', sources)

    makeplots(butler,
              dataId,
              ps,
              sources=sources,
              pids=opt.pid,
              minsize=opt.minsize,
Exemple #12
0
 def setUp(self):
     self.butler = dafPersist.Butler(os.path.join(ROOT, 'butlerAlias/data/input'), MinMapper)
     self.butler.defineAlias(self.datasetType, 'raw')
"""

import os
import shutil
import subprocess

from astropy.io import fits

import lsst.daf.persistence as dafPersist
import lsst.utils

dataDir = lsst.utils.getPackageDir('testdata_jointcal')
repo = os.path.join(dataDir, 'hsc')
sourcerepo = '/datasets/hsc/repo'

butlerTest = dafPersist.Butler(repo)
butlerSource = dafPersist.Butler(sourcerepo)

subset = butlerTest.subset('src')

for dataRef in subset:
    # Confirm that this src catalog actually exists in the repo.
    if not butlerTest.datasetExists('src', dataId=dataRef.dataId):
        continue

    sourceRawUri = butlerSource.getUri('raw', dataId=dataRef.dataId)
    testRawUri = butlerTest.getUri('raw', dataId=dataRef.dataId, write=True)

    testPath = os.path.dirname(testRawUri)
    if not os.path.isdir(testPath):
        os.makedirs(testPath)
                        default='id',
                        help="id option to put in front "
                        "of the visit name. Could be 'selectId' or 'id'")
    args = parser.parse_args()

    if not os.path.exists(args.input):
        raise IOError("Input directory does not exists")
    if args.idopt not in ['selectId', 'id']:
        raise IOError("Option idopt must be 'selectid' or 'id'")

    dummy = os.system('mkdir -p %s' % args.output)

    # Load the butler for this input directory
    datadir = args.input
    print("butler call on dir %s" % datadir)
    butler = dafPersist.Butler(datadir)

    if args.increment:
        print("INFO: Checking for dataIds that haven't been processed yet.")
        # Only keep visit that haven't been processed yet (no calexp data)
        dataids = compare_dataIds(get_dataIds(args.type),
                                  get_dataIds('calexp'))
    else:
        # Process all visit found in the input directories
        dataids = get_dataIds(args.type)
    print("INFO %i (new) dataIds to process found" % len(dataids))

    # Get the list of available filters
    filters = set([dataid['filter'] for dataid in dataids])
    print("INFO: Working on %i filters:" % len(filters), filters)
Exemple #15
0
 def __init__(self, dataRoot, butler_policy, butler_keys, logger):
     """Instantiate ButlerGet to be passed to ImageGetter."""
     self.butler = dafPersist.Butler(dataRoot)
     self.butler_policy = butler_policy
     self.butler_keys = butler_keys
     logger.debug("Instantiate ButlerGet.")
Exemple #16
0
                sub = afwImage.MaskedImageF(newExp.getMaskedImage(), lsstGeom.Box2I(lsstGeom.Point2I(_bbox.getMinX()-xmin, _bbox.getMinY()-ymin), _bbox.getDimensions()), afwImage.PARENT)
                #sub <<= exposure.getMaskedImage()
                sub.assign(exposure.getMaskedImage())

            image_fname = '%s_%s.fits' % (specObjID, band)
            newExp.setXY0(lsstGeom.Point2I(xmin,ymin))
            newExp.setWcs(wcs)
            newExp.writeFits(image_fname)

        except Exception as e:
            print(e.args)

if __name__ == '__main__':

    #butler = dafPersist.Butler('/gpfs02/HSC_DR/hsc_ssp/dr2/s18a/data/s18a_wide')
    butler = dafPersist.Butler('/gpfs02/HSC_DR/hsc_ssp/dr3/s20a/data/s20a_wide')
    skymap = butler.get('deepCoadd_skyMap')

    """
    src = butler.get('deepCoadd_meas', {'tract': 9813, 'patch': '4,4', 'filter': 'HSC-I'})
    print(src[0].getCoord())
    print(type(src[0].getCoord()))
    import sys
    sys.exit(1)
    """
    
    bands = ['HSC-G', 'HSC-R', 'HSC-I', 'HSC-Z', 'HSC-Y']                      

    f = open('cut_out.txt')
    for line in f:
        if line[0] == '#':
Exemple #17
0
def coaddImageCutFull(root, ra, dec, size, saveSrc=True, savePsf=True,
                      filt='HSC-I', prefix='hsc_coadd_cutout', verbose=True,
                      extraField1=None, extraValue1=None, butler=None,
                      visual=True, imgOnly=False):
    """Get the cutout around a location."""
    coaddData = "deepCoadd_calexp"
    pipeNew = True

    # Get the SkyMap of the database
    if butler is None:
        try:
            butler = dafPersist.Butler(root)
            if verbose:
                print SEP
                print "## Load in the Butler"
        except Exception:
            print WAR
            print '## Can not load the correct Butler!'
    skyMap = butler.get("deepCoadd_skyMap", immediate=True)

    # (Ra, Dec) Pair for the center
    raDec = afwCoord.Coord(ra*afwGeom.degrees, dec*afwGeom.degrees)
    # [Ra, Dec] list
    raList, decList = getCircleRaDec(ra, dec, size)
    points = map(lambda x, y: afwGeom.Point2D(x, y), raList, decList)
    raDecList = map(lambda x: afwCoord.IcrsCoord(x), points)

    # Expected size and center position
    dimExpect = int(2 * size + 1)
    cenExpect = (dimExpect/2.0, dimExpect/2.0)
    sizeExpect = int(dimExpect ** 2)
    # Get the half size of the image in degree
    sizeDegree = size * 0.168 / 3600.0

    # Verbose
    if verbose:
        print SEP
        print " Input Ra, Dec: %10.5f, %10.5f" % (ra, dec)
        print " Cutout size is expected to be %d x %d" % (dimExpect, dimExpect)

    # Create empty arrays
    imgEmpty = np.empty((dimExpect, dimExpect), dtype="float")
    imgEmpty.fill(np.nan)
    if not imgOnly:
        mskEmpty = np.empty((dimExpect, dimExpect), dtype="uint8")
        varEmpty = np.empty((dimExpect, dimExpect), dtype="float")
        detEmpty = np.empty((dimExpect, dimExpect), dtype="float")
        mskEmpty.fill(np.nan)
        varEmpty.fill(np.nan)
        detEmpty.fill(np.nan)

    # Figure out the area we want, and read the data.
    # For coadds the WCS is the same in all bands,
    # but the code handles the general case
    # Start by finding the tract and patch
    matches = skyMap.findTractPatchList(raDecList)
    tractList, patchList = getTractPatchList(matches)
    nPatch = len(patchList)
    if verbose:
        print "### Will deal with %d patches" % nPatch
    # Prefix of the output file
    outPre = prefix + '_' + filt + '_full'

    newX = []
    newY = []
    boxX = []
    boxY = []
    boxSize = []
    #
    trList = []
    paList = []
    zpList = []
    #
    imgArr = []
    mskArr = []
    varArr = []
    detArr = []
    psfArr = []
    #
    srcArr = []
    refArr = []
    forceArr = []

    # Go through all these images
    for j in range(nPatch):
        # Tract, patch
        tract, patch = tractList[j], patchList[j]
        print SEP
        print "### Dealing with %d - %s" % (tract, patch)
        print SEP
        # Check if the coordinate is available in all three bands.
        try:
            # Get the coadded exposure
            coadd = butler.get(coaddData, tract=tract,
                               patch=patch, filter=filt,
                               immediate=True)
        except Exception, errMsg:
            print WAR
            print " No data is available in %d - %s" % (tract, patch)
            print "#########################################################"
            print WAR
        else:
            # Get the WCS information
            wcs = coadd.getWcs()
            # Check if cdMatrix has been assigned
            cdExist = 'cdMatrix' in locals()
            if not cdExist:
                # Get the CD Matrix of the WCS
                cdMatrix = wcs.getCDMatrix()
                # Get the pixel size in arcsec
                pixScale = wcs.pixelScale().asDegrees() * 3600.0
            # Convert the central coordinate from Ra,Dec to pixel unit
            pixel = wcs.skyToPixel(raDec)
            pixel = afwGeom.Point2I(pixel)
            # Define the bounding box for the central pixel
            bbox = afwGeom.Box2I(pixel, pixel)
            # Grow the bounding box to the desired size
            bbox.grow(int(size))
            xOri, yOri = bbox.getBegin()
            # Compare to the coadd image, and clip
            bbox.clip(coadd.getBBox(afwImage.PARENT))
            # Get the masked image
            try:
                subImage = afwImage.ExposureF(coadd, bbox,
                                              afwImage.PARENT)
            except Exception:
                print WAR
                print '### SOMETHING IS WRONG WITH THIS BOUNDING BOX !!'
                print "    %d -- %s -- %s " % (tract, patch, filt)
                print "    Bounding Box Size: %d" % (bbox.getWidth() *
                                                     bbox.getHeight())
            else:
                # Extract the image array
                imgArr.append(subImage.getMaskedImage().getImage().getArray())

                if not imgOnly:
                    # Extract the detect mask array
                    mskDet = getCoaddMskPlane(subImage, 'DETECTED')
                    detArr.append(mskDet.getArray())
                    # Extract the variance array
                    imgVar = subImage.getMaskedImage().getVariance().getArray()
                    varArr.append(imgVar)

                    # Extract the bad mask array
                    mskBad = getCoaddBadMsk(subImage, pipeNew=pipeNew)
                    mskArr.append(mskBad.getArray())

                # Save the width of the BBox
                boxX.append(bbox.getWidth())
                # Save the heigth of the BBox
                boxY.append(bbox.getHeight())
                # Save the size of the BBox in unit of pixels
                boxSize.append(bbox.getWidth() * bbox.getHeight())
                # New X, Y origin coordinates
                newX.append(bbox.getBeginX() - xOri)
                newY.append(bbox.getBeginY() - yOri)
                # Tract, Patch
                trList.append(tract)
                paList.append(patch)
                # Photometric zeropoint
                zpList.append(2.5 * np.log10(
                              coadd.getCalib().getFluxMag0()[0]))
                # If necessary, save the psf images
                if savePsf and (not imgOnly):
                    psfImg = getCoaddPsfImage(coadd, raDec)
                    psfArr.append(psfImg)
                # Get the new (X,Y) coordinate of the galaxy center
                newCenExist = 'newCenX' in locals() and 'newCenY' in locals()
                if not newCenExist:
                    subWcs = subImage.getWcs()
                    newCenX, newCenY = subWcs.skyToPixel(raDec)
                    newCenX = newCenX - xOri
                    newCenY = newCenY - yOri
Exemple #18
0
            yref.append(point.getY())
        return xref, yref, ref_cat

def get_seps(src_cat, calexp, ref_cat, mag_cut=22):
    src_mags \
        = calexp.getCalib().getMagnitude(src_cat['slot_ModelFlux_instFlux'])
    mag_sel = np.where(src_mags < mag_cut)
    src = SkyCoord(ra=src_cat['coord_ra'][mag_sel],
                   dec=src_cat['coord_dec'][mag_sel], unit='rad')
    ref = SkyCoord(ra=ref_cat['coord_ra'], dec=ref_cat['coord_dec'], unit='rad')
    _, dist, _ = src.match_to_catalog_sky(ref)
    return dist.milliarcsecond

if __name__ == '__main__':
    import sys
    butler = dp.Butler('/global/cscratch1/sd/jchiang8/desc/Run1.2p_analysis/output_2018-10-04/rerun/jchiang/w_2018_39')
    ref_cat = RefCat(butler)
    visit, raft, sensor = sys.argv[1:4]
    dataId = dict(visit=int(visit), raftName=raft, detectorName=sensor)
    calexp = butler.get('calexp', dataId=dataId)
    src = butler.get('src', dataId=dataId)
    xref, yref, my_ref_cat = ref_cat.get_pixel_coords(dataId)

    show_mask = False
    show_mask = True
    colors = default_colors if show_mask else {}
    fig = plt.figure(figsize=(18, 7.5))
    fig.add_subplot(1, 2, 1)
    display_calexp(calexp, colors=colors, percentiles=(0, 99.95))
    xvals, yvals, src_cat \
        = overlay_sources(src, calexp, ref_pix_coords=(xref, yref))
Exemple #19
0
def returnMatchTable(rootDir,
                     visit,
                     ccdList,
                     outfile=None,
                     fakeCat=None,
                     overwrite=False,
                     filt=None,
                     tol=1.0,
                     pixMatch=False,
                     multiband=False,
                     reffMatch=False,
                     pix=0.168,
                     multijobs=1,
                     includeMissing=True,
                     minRad=None,
                     raCol='RA',
                     decCol='Dec'):
    """
    Driver (main function) for return match to fakes.

    INPUT: rootDir = rerun directory
           visit = visit id (int) (or tracts)
           ccdList = list of ccds to look at (or patches)
           outdir = output directory for matched file,
                    None means no output written
           fakeCat = fake catalog to match to,
                     None means the fake sources are just
                     extracted from the header of the CCDs based on
                     position but no matching is done
           overwrite = whether to overwrite the existing output file,
                       default is False
           pixMatch = do pixel matching instead of ra/dec matching
                      even if there is a catalog supplied
           multiband = whether match to forced photometry catalogs
                       from multiband process
           reffMatch = whether match fake sources in pixel radius
                       or using tol x Reff (Only for Ra, Dec match)
    OUTPUT: returns an astropy.table.Table with all the entries
            from the source catalog for objects which match in pixel
            position to the fake sources
    """
    butler = dafPersist.Butler(rootDir)
    slist = None

    if multijobs > 1:
        try:
            from joblib import Parallel, delayed
            mlist = Parallel(n_jobs=multijobs)(
                delayed(returnMatchSingle)(butler,
                                           None,
                                           visit,
                                           ccd,
                                           filt=filt,
                                           fakeCat=fakeCat,
                                           includeMissing=includeMissing,
                                           pixMatch=pixMatch,
                                           reffMatch=reffMatch,
                                           tol=tol,
                                           multiband=multiband,
                                           minRad=minRad,
                                           pix=pix,
                                           decCol=decCol,
                                           raCol=raCol) for ccd in ccdList)
            for m in mlist:
                if m is not None:
                    if slist is None:
                        slist = m.copy(True)
                    else:
                        slist.extend(m, True)
                    del m
        except ImportError:
            print("# Can not import joblib, stop multiprocessing!")
            for ccd in ccdList:
                slist = returnMatchSingle(butler,
                                          slist,
                                          visit,
                                          ccd,
                                          filt=filt,
                                          fakeCat=fakeCat,
                                          includeMissing=includeMissing,
                                          pixMatch=pixMatch,
                                          reffMatch=reffMatch,
                                          tol=tol,
                                          pix=pix,
                                          multiband=multiband,
                                          minRad=minRad,
                                          raCol=raCol,
                                          decCol=decCol)
    else:
        for ccd in ccdList:
            slist = returnMatchSingle(butler,
                                      slist,
                                      visit,
                                      ccd,
                                      filt=filt,
                                      fakeCat=fakeCat,
                                      includeMissing=includeMissing,
                                      pixMatch=pixMatch,
                                      reffMatch=reffMatch,
                                      tol=tol,
                                      pix=pix,
                                      multiband=multiband,
                                      minRad=minRad,
                                      raCol=raCol,
                                      decCol=decCol)

    if slist is None:
        print("Returns no match....!")

        return None
    else:
        astroTable = getAstroTable(slist, mags=True)

        if fakeCat is not None:
            astroTable = matchToFakeCatalog(astroTable, fakeCat)

        if outfile is not None:
            try:
                astroTable.write(outfile + '.fits',
                                 format='fits',
                                 overwrite=overwrite)
            except IOError:
                print("Try setting the option -w to overwrite the file.")
                raise

        return astroTable
def tractFindVisits(rerun,
                    tract,
                    filter='HSC-I',
                    patch=None,
                    dataDir='/lustre/Subaru/SSP/rerun/'):
    """Return the list of input Visits to coadd."""
    butler = dafPersist.Butler(os.path.join(dataDir, rerun))

    pipeVersion = dafPersist.eupsVersions.EupsVersions().versions['hscPipe']
    if StrictVersion(pipeVersion) >= StrictVersion('3.9.0'):
        coaddData = "deepCoadd_calexp"
    else:
        coaddData = "deepCoadd"

    if patch is not '':
        """
        Only 1 Patch is required
        """
        coadd = butler.get(coaddData,
                           dataId={
                               "tract": tract,
                               "patch": patch,
                               "filter": filter
                           },
                           immediate=True)
        ccdInputs = coadd.getInfo().getCoaddInputs().ccds
        visits = np.unique(ccdInputs.get("visit"))
        print "\n# Visits for Tract=%d Filter=%s Patch=%s\n" % (tract, filter,
                                                                patch)
    else:
        """
        Go through all the possible patches
        """
        visits = np.empty([0], dtype=int)
        for pa in itertools.combinations_with_replacement((np.arange(9)), 2):
            patch = str(pa[0]) + ',' + str(pa[1])
            try:
                coadd = butler.get(coaddData,
                                   dataId={
                                       "tract": tract,
                                       "patch": patch,
                                       "filter": filter
                                   },
                                   immediate=True)
            except Exception:
                continue
            ccdInputs = coadd.getInfo().getCoaddInputs().ccds
            vTemp = np.unique(ccdInputs.get("visit"))
            visits = np.unique(np.append(visits, vTemp))

        for pa in itertools.combinations_with_replacement((np.arange(9)), 2):
            patch = str(pa[1]) + ',' + str(pa[0])
            try:
                coadd = butler.get(coaddData,
                                   dataId={
                                       "tract": tract,
                                       "patch": patch,
                                       "filter": filter
                                   },
                                   immediate=True)
            except Exception:
                continue
            ccdInputs = coadd.getInfo().getCoaddInputs().ccds
            vTemp = np.unique(ccdInputs.get("visit"))
            visits = np.unique(np.append(visits, vTemp))

        print "\n# Input visits for Tract=%d Filter=%s\n" % (tract, filter)

    line = ''
    print " # Input CCDs includes %d Visits\n" % len(visits)
    for vv in visits:
        line = line + str(vv) + '^'

    print line[:-1] + '\n'

    return visits
Exemple #21
0
 def testNonexistentValue(self):
     butler = dafPersist.Butler(
         outputs={'mode': 'rw', 'root': self.tmpRoot, 'mapper': ImgMapper})
     ButlerSubsetTestCase.registerAliases(butler)
     subset = butler.subset(self.calexpTypeName, skyTile=2349023905239)
     self.assertEqual(len(subset), 0)
def sfp_validation_plots(repo,
                         visit,
                         outdir='.',
                         flux_type='base_PsfFlux',
                         opsim_db=None,
                         figsize=(12, 10),
                         max_offset=0.1,
                         sn_min=150):
    """
    Create the single-frame validation plots.

    Parameters
    ----------
    repo: str
        Data repository containing calexps.
    visit: int
        Visit number.
    outdir: str ['.']
        Directory to contain output files.
    flux_type: str ['base_PsfFlux']
        Flux column to use for selecting well-measured point sources.
    opsim_db: str [None]
        OpSim db file containing pointing information.  This is used
        to get the pointing direction for the ref cat selection and
        the predicted five sigma depth for the visit.  If None, then
        the pointing direction will be inferred from the calexps.
    figsize: (float, float) [(12, 10)]
        Size of the figure in inches.
    max_offset: float [0.1]
        Maximum offset, in arcsec, for positional matching of point
        sources to ref cat stars.
    sn_min: float [150]
        Mininum signal-to-noise cut on psfFlux/psfFluxErr.

    Returns
    -------
    pandas.DataFrame containg the visit-level metrics:
        (median astrometric offset, median delta magitude, median T value,
         extrapolated five sigma depth)
    """
    butler = dp.Butler(repo)
    try:
        try:
            band = list(butler.subset('src', visit=visit))[0].dataId['filter']
        except dp.butlerExceptions.NoResults:
            band = list(butler.subset('src', expId=visit))[0].dataId['filter']
    except Exception as eobj:
        print('visit:', visit)
        print(eobj)
        raise eobj
    center_radec = get_center_radec(butler, visit, opsim_db)
    ref_cat = get_ref_cat(butler, visit, center_radec)

    os.makedirs(outdir, exist_ok=True)
    pickle_file = os.path.join(outdir, f'sfp_validation_v{visit}-{band}.pkl')

    if not os.path.isfile(pickle_file):
        df = visit_ptsrc_matches(butler,
                                 visit,
                                 center_radec,
                                 max_offset=max_offset)
        df.to_pickle(pickle_file)
    else:
        df = pd.read_pickle(pickle_file)

    fig = plt.figure(figsize=figsize)
    ax = fig.add_subplot(2, 2, 1)

    coord_ra = np.array([_.asRadians() for _ in df['coord_ra']])
    coord_dec = np.array([_.asRadians() for _ in df['coord_dec']])
    dra = np.degrees(
        (df['ref_ra'] - coord_ra) * np.cos(df['ref_dec'])) * 3600 * 1000
    ddec = np.degrees((df['ref_dec'] - coord_dec)) * 3600 * 1000

    max_offset *= 1e3 * 1.2
    xy_range = (-max_offset, max_offset)
    plt.hexbin(dra, ddec, mincnt=1)
    plt.xlabel('RA offset (mas)')
    plt.ylabel('Dec offset (mas)')
    plt.xlim(*xy_range)
    plt.ylim(*xy_range)

    nullfmt = NullFormatter()

    ax_ra = ax.twinx()
    ax_ra.yaxis.set_major_formatter(nullfmt)
    ax_ra.yaxis.set_ticks([])
    bins, _, _ = plt.hist(dra,
                          bins=50,
                          histtype='step',
                          range=xy_range,
                          density=True,
                          color='red')
    ax_ra.set_ylim(0, 2.3 * np.max(bins))

    ax_dec = ax.twiny()
    ax_dec.xaxis.set_major_formatter(nullfmt)
    ax_dec.xaxis.set_ticks([])
    bins, _, _ = plt.hist(ddec,
                          bins=50,
                          histtype='step',
                          range=xy_range,
                          density=True,
                          color='red',
                          orientation='horizontal')
    ax_dec.set_xlim(0, 2.3 * np.max(bins))

    median_offset = np.median(df['offset'])
    plt.annotate(f'{median_offset:.1f} mas median offset', (0.5, 0.95),
                 xycoords='axes fraction',
                 horizontalalignment='left')

    plt.title(f'v{visit}-{band}')
    plt.colorbar()

    fig.add_subplot(2, 2, 2)
    bins = 20
    delta_mag = df['src_mag'] - df['ref_mag']
    dmag_med = np.nanmedian(delta_mag)
    ymin, ymax = dmag_med - 0.5, dmag_med + 0.5
    plt.hexbin(df['ref_mag'], delta_mag, mincnt=1)
    plot_binned_stats(df['ref_mag'],
                      delta_mag,
                      x_range=plt.axis()[:2],
                      bins=20)
    plt.xlabel('ref_mag')
    plt.ylabel(f'{flux_type}_mag - ref_mag')
    plt.ylim(ymin, ymax)
    plt.title(f'v{visit}-{band}')
    plt.colorbar()
    xmin, xmax = plt.axis()[:2]

    fig.add_subplot(2, 2, 3)
    T = (df['base_SdssShape_xx'] + df['base_SdssShape_yy']) * 0.2**2
    tmed = np.nanmedian(T)
    ymin, ymax = tmed - 0.1, tmed + 0.1
    plt.hexbin(df['ref_mag'], T, mincnt=1, extent=(xmin, xmax, ymin, ymax))
    plot_binned_stats(df['ref_mag'], T, x_range=plt.axis()[:2], bins=20)
    plt.xlabel('ref_mag')
    plt.ylabel('T (arcsec**2)')
    plt.ylim(ymin, ymax)
    plt.title(f'v{visit}-{band}')
    plt.colorbar()

    ax1 = fig.add_subplot(2, 2, 4)
    x_range = (12, 26)
    plot_detection_efficiency(butler, visit, df, ref_cat, x_range=x_range)
    plt.title(f'v{visit}-{band}')

    ax2 = ax1.twinx()
    ax2.set_ylabel('S/N', color='red')
    snr = df['base_PsfFlux_instFlux'] / df['base_PsfFlux_instFluxErr']
    ref_mags, SNR_values, _ = plot_binned_stats(df['ref_mag'],
                                                snr,
                                                x_range=x_range,
                                                bins=20,
                                                color='red')
    m5, mag_func = extrapolate_nsigma(ref_mags, SNR_values, nsigma=5)
    plt.xlim(*x_range)

    plt.yscale('log')
    ymin, ymax = 1, plt.axis()[-1]
    plt.ylim(ymin, ymax)
    plt.axhline(5, linestyle=':', color='red')
    yvals = np.logspace(np.log10(ymin), np.log10(ymax), 50)
    plt.plot(mag_func(np.log10(yvals)), yvals, linestyle=':', color='red')
    if opsim_db is not None:
        plt.axvline(get_five_sigma_depth(opsim_db, visit),
                    linestyle='--',
                    color='red')

    plt.tight_layout()
    outfile = os.path.join(outdir, f'sfp_validation_v{visit}-{band}.png')
    plt.savefig(outfile)

    # Make plot of psf_mag - calib_mag distribution.
    fig = plt.figure(figsize=(6, 4))
    dmag_calib_median = psf_mag_check(repo, visit, sn_min=sn_min)
    plt.title(f'v{visit}-{band}')
    outfile = os.path.join(outdir, f'delta_mag_calib_v{visit}-{band}.png')
    plt.savefig(outfile)

    # Make plot of psf_mag - ref_mag distribution.
    my_df = df.query('base_PsfFlux_instFlux/base_PsfFlux_instFluxErr'
                     f' > {sn_min}')
    fig = plt.figure(figsize=(6, 4))
    dmag_ref_median = plot_dmags(my_df['src_mag'],
                                 my_df['ref_mag'],
                                 sn_min=sn_min)
    plt.title(f'v{visit}-{band}')
    outfile = os.path.join(outdir, f'delta_mag_ref_v{visit}-{band}.png')
    plt.savefig(outfile)

    # Make psf whisker plot.
    psf_whisker_plot(butler, visit)
    outfile = os.path.join(outdir, f'psf_whisker_plot_v{visit}-{band}.png')
    plt.savefig(outfile)

    df = pd.DataFrame(data=dict(visit=[visit],
                                ast_offset=[median_offset],
                                dmag_ref_median=[dmag_ref_median],
                                dmag_calib_median=[dmag_calib_median],
                                T_median=[tmed],
                                m5=[m5],
                                band=[band]))
    metrics_file = os.path.join(outdir, f'sfp_metrics_v{visit}-{band}.pkl')
    df.to_pickle(metrics_file)

    return df
def reportPatchesWithImages(butler, visits=None, ccdkey='sensor'):

    # create a butler object associated to the output directory
    butler = dafPersist.Butler(butler)

    # Get the skymap
    skyMap = butler.get("deepCoadd_skyMap")

    # Get the calexp metadata
    keys = sorted(butler.getKeys("calexp").keys())
    metadata = butler.queryMetadata("calexp", format=keys)

    # Create a list of available dataids
    dataids = [
        dict(zip(keys,
                 list(v) if not isinstance(v, list) else v)) for v in metadata
    ]

    # Organize the dataids by visit
    vdataids = organize_by_visit(dataids, visits=visits)

    if visits is None or len(visits) != 1:
        # Get the ccds that will be used to compute the visit corner coordinates
        # this depend on the instrument, so cannot be hardcoded
        ccds = get_visit_corners(butler,
                                 vdataids[list(vdataids)[0]],
                                 getccds=True,
                                 ccdkey=ccdkey)

        # Get the corners coordinates for all visits
        allcoords = []
        for ii, vdataid in enumerate(vdataids):
            print("Running on visit %03d / %i" % (ii + 1, len(vdataids)))
            allcoords.append(
                get_visit_corners(butler,
                                  vdataids[vdataid],
                                  ccds=ccds,
                                  ccdkey=ccdkey))

        # Get the tract/patch list in which the visits are
        alltps = []
        for vdataid, vcoords in zip(vdataids, allcoords):
            alltps.extend(
                get_tps(skyMap, vcoords, vdataids[vdataid][0]['filter']))
    else:
        # Only one visit given, so run the code on all sensor/ccd
        # Get the corners coordinates for all visits
        visit = int(visits[0])
        print("%i dataIds loaded for visit" % len(vdataids[visit]), visit)
        allcoords = get_dataid_corners(butler, vdataids[visit], ccdkey=ccdkey)

        # Get the tract/patch list in which the sensor are
        alltps = []
        for coords in allcoords:
            alltps.extend(get_tps(skyMap, coords,
                                  vdataids[visit][0]['filter']))

    # Re-organize the tract and patch list into a dictionnary
    tps = {}
    for tp in alltps:
        if tp[0] not in tps:
            tps[tp[0]] = []
        if tp[1] not in tps[tp[0]]:
            tps[tp[0]].append(tp[1])

    return tps
Exemple #24
0
def test():
    butler = dafPersist.Butler(inputs='DATA/rerun/coaddForcedPhot')

    coadd = butler.get("deepCoadd_calexp",
                       tract=0,
                       patch='1,1',
                       filter='HSC-I')

    rSources = butler.get('deepCoadd_forced_src', {
        'filter': 'HSC-R',
        'tract': 0,
        'patch': '1,1'
    })
    iSources = butler.get('deepCoadd_forced_src', {
        'filter': 'HSC-I',
        'tract': 0,
        'patch': '1,1'
    })

    rCoaddCalexp = butler.get('deepCoadd_calexp', {
        'filter': 'HSC-R',
        'tract': 0,
        'patch': '1,1'
    })
    rCoaddCalib = rCoaddCalexp.getCalib()
    iCoaddCalexp = butler.get('deepCoadd_calexp', {
        'filter': 'HSC-I',
        'tract': 0,
        'patch': '1,1'
    })
    iCoaddCalib = iCoaddCalexp.getCalib()

    ##############################################
    # unique_id = str(uuid.uuid4())
    # iCoaddCalib.writeFits(unique_id+".fits")
    # file_name = "patch_1_1"+".fits"
    # coadd.writeFits(file_name)
    # new_link = post_image(file_name)
    # os.remove(file_name)
    display = afwDisplay.getDisplay()
    display.mtv(coadd)
    display.setMaskTransparency(100)
    display.scale("asinh", -1, 30)
    ###################################################

    rCoaddCalib.setThrowOnNegativeFlux(False)
    iCoaddCalib.setThrowOnNegativeFlux(False)

    # print(iSources.getSchema())

    # rMags = rCoaddCalib.getMagnitude(rSources['base_PsfFlux_instFlux'])
    iMags = iCoaddCalib.getMagnitude(iSources['base_PsfFlux_flux'])
    maxMag = max(iMags)

    isDeblended = rSources['deblend_nChild'] == 0

    refTable = butler.get('deepCoadd_ref', {
        'filter': 'HSC-R^HSC-I',
        'tract': 0,
        'patch': '1,1'
    })

    inInnerRegions = refTable['detect_isPatchInner'] & refTable[
        'detect_isTractInner']

    isSkyObject = refTable['merge_peak_sky']

    isPrimary = refTable['detect_isPrimary']

    #rMags[isPrimary]
    #iMags[isPrimary]

    isStellar = iSources['base_ClassificationExtendedness_value'] < 1.

    isNotStellar = iSources['base_ClassificationExtendedness_value'] >= 1.

    isGoodFlux = ~iSources['base_PsfFlux_flag']

    selected = isPrimary & isStellar & isGoodFlux

    for src in iSources[selected]:
        # iMag = iCoaddCalib.getMagnitude(src['base_PsfFlux_flux'])
        # print(iMag)
        # if src.getX() > 5900 and src.getX() < 6000 and src.getY() > 6000 and src.getY() < 6100:
        #     print(str(src.getX())+ ", "+str(src.getY()))
        #     display.dot("o", src.getX(), src.getY(), size=10, ctype='orange')
        #
        # if iMag < maxMag/2.75:
        #     print(str(src.getX())+ ", "+str(src.getY()))
        display.dot("o", src.getX(), src.getY(), size=20, ctype='green')
Exemple #25
0
 def setUpClass(cls):
     # For lsstSim specific reasons, we need to specify the raft and sensor
     dataId = dict(visit=840, raft='2,2', sensor='1,1')
     butler = dafPersistence.Butler(InputDir)
     cls.exposure = butler.get('eimage', dataId=dataId)
     cls.visit_info = cls.exposure.getInfo().getVisitInfo()
Exemple #26
0
def main():

    butler = dafPersist.Butler(inputs='DATA/rerun/coaddForcedPhot')

    patches = ['0,1', '1,0', '1,1', '1,2', '2,0', '2,1', '2,2']

    id = 0

    for patch in patches:
        coadd = butler.get("deepCoadd_calexp",
                           tract=0,
                           patch=patch,
                           filter='HSC-I')

        rSources = butler.get('deepCoadd_forced_src', {
            'filter': 'HSC-R',
            'tract': 0,
            'patch': patch
        })
        iSources = butler.get('deepCoadd_forced_src', {
            'filter': 'HSC-I',
            'tract': 0,
            'patch': patch
        })

        rCoaddCalexp = butler.get('deepCoadd_calexp', {
            'filter': 'HSC-R',
            'tract': 0,
            'patch': patch
        })
        rCoaddCalib = rCoaddCalexp.getCalib()
        iCoaddCalexp = butler.get('deepCoadd_calexp', {
            'filter': 'HSC-I',
            'tract': 0,
            'patch': patch
        })
        iCoaddCalib = iCoaddCalexp.getCalib()

        ###############################################
        result = [x.strip() for x in patch.split(',')]
        file_name = "patch_" + result[0] + "_" + result[1] + ".fits"
        coadd.writeFits(file_name)

        # send new unique file to s3 and delete it from local storage
        new_link = post_image(file_name)
        # os.remove(file_name)

        # post_image_database(unique_id)
        ##################################################

        rCoaddCalib.setThrowOnNegativeFlux(False)
        iCoaddCalib.setThrowOnNegativeFlux(False)

        iMags = iCoaddCalib.getMagnitude(iSources['base_PsfFlux_flux'])
        maxMag = max(iMags)

        isDeblended = rSources['deblend_nChild'] == 0

        refTable = butler.get('deepCoadd_ref', {
            'filter': 'HSC-R^HSC-I',
            'tract': 0,
            'patch': patch
        })

        inInnerRegions = refTable['detect_isPatchInner'] & refTable[
            'detect_isTractInner']

        isSkyObject = refTable['merge_peak_sky']

        isPrimary = refTable['detect_isPrimary']

        isStellar = iSources['base_ClassificationExtendedness_value'] < 1.

        isNotStellar = iSources['base_ClassificationExtendedness_value'] >= 1.

        isGoodFlux = ~iSources['base_PsfFlux_flag']

        selected = isPrimary & isNotStellar & isGoodFlux

        for src in iSources[selected]:
            iMag = iCoaddCalib.getMagnitude(src['base_PsfFlux_flux'])
            # print(str(src.getX())+ ", "+str(src.getY()))
            if iMag < maxMag / 2.75:
                post_top_src_database(str(id), file_name, str(src.getX()),
                                      str(src.getY()), 'EXT')
                id += 1

        selected2 = isPrimary & isStellar & isGoodFlux

        for src in iSources[selected2]:
            iMag = iCoaddCalib.getMagnitude(src['base_PsfFlux_flux'])
            # print(str(src.getX())+ ", "+str(src.getY()))
            if iMag < maxMag / 2.75:
                post_top_src_database(str(id), file_name, str(src.getX()),
                                      str(src.getY()), 'STAR')
                id += 1

        print(id)
Exemple #27
0
def mosaicDIASources(repo_dir,
                     visitid,
                     ccdnum=10,
                     cutout_size=30,
                     template_catalog=None,
                     xnear=None,
                     ynear=None,
                     sourceIds=None,
                     gridSpec=[7, 4],
                     dipoleFlag='ip_diffim_ClassificationDipole_value'):
    import matplotlib.pyplot as plt
    import matplotlib
    matplotlib.style.use('ggplot')
    import matplotlib.gridspec as gridspec
    import lsst.daf.persistence as dafPersist

    #
    # This matches up which exposures were differenced against which templates,
    # and is purely specific to this particular set of data.
    if template_catalog is None:
        template_catalog = {
            197790: [197802, 198372, 198376, 198380, 198384],
            197662: [198668, 199009, 199021, 199033],
            197408: [197400, 197404, 197412],
            197384: [197388, 197392],
            197371: [197367, 197375, 197379]
        }
    # Need to invert this to template_visit_catalog[exposure] = template
    template_visit_catalog = {}
    for templateid, visits in template_catalog.iteritems():
        for visit in visits:
            template_visit_catalog[visit] = templateid

    def make_cutout(img, x, y, cutout_size=20):
        return img[(x - cutout_size // 2):(x + cutout_size // 2),
                   (y - cutout_size // 2):(y + cutout_size // 2)]

    def group_items(items, group_length):
        for n in xrange(0, len(items), group_length):
            yield items[n:(n + group_length)]

    b = dafPersist.Butler(repo_dir)

    template_visit = template_visit_catalog[visitid]
    templateExposure = b.get("calexp",
                             visit=template_visit,
                             ccdnum=ccdnum,
                             immediate=True)
    template_img, _, _ = templateExposure.getMaskedImage().getArrays()
    template_wcs = templateExposure.getWcs()

    sourceExposure = b.get("calexp",
                           visit=visitid,
                           ccdnum=ccdnum,
                           immediate=True)
    source_img, _, _ = sourceExposure.getMaskedImage().getArrays()

    subtractedExposure = b.get("deepDiff_differenceExp",
                               visit=visitid,
                               ccdnum=ccdnum,
                               immediate=True)
    subtracted_img, _, _ = subtractedExposure.getMaskedImage().getArrays()
    subtracted_wcs = subtractedExposure.getWcs()

    diaSources = b.get("deepDiff_diaSrc",
                       visit=visitid,
                       ccdnum=ccdnum,
                       immediate=True)

    masked_img = subtractedExposure.getMaskedImage()
    img_arr, mask_arr, var_arr = masked_img.getArrays()
    z1, z2 = zscale_image(img_arr)

    top_level_grid = gridspec.GridSpec(gridSpec[0], gridSpec[1])

    source_ind = 0
    for source_n, source in enumerate(diaSources):

        source_id = source.getId()
        if sourceIds is not None and not np.in1d(source_id, sourceIds)[0]:
            continue

        source_x = source.get("ip_diffim_NaiveDipoleCentroid_x")
        source_y = source.get("ip_diffim_NaiveDipoleCentroid_y")
        if xnear is not None and not np.any(
                np.abs(source_x - xnear) <= cutout_size):
            continue
        if ynear is not None and not np.any(
                np.abs(source_y - ynear) <= cutout_size):
            continue

        #is_dipole = source.get("ip_diffim_ClassificationDipole_value") == 1
        dipoleLabel = ''
        if source.get(dipoleFlag) == 1:
            dipoleLabel = 'Dipole'
        if source.get("ip_diffim_DipoleFit_flag_classificationAttempted") == 1:
            dipoleLabel += ' *'
        template_xycoord = template_wcs.skyToPixel(
            subtracted_wcs.pixelToSky(source_x, source_y))
        cutouts = [
            make_cutout(template_img,
                        template_xycoord.getY(),
                        template_xycoord.getX(),
                        cutout_size=cutout_size),
            make_cutout(source_img,
                        source_y,
                        source_x,
                        cutout_size=cutout_size),
            make_cutout(subtracted_img,
                        source_y,
                        source_x,
                        cutout_size=cutout_size)
        ]

        try:
            subgrid = gridspec.GridSpecFromSubplotSpec(
                1, 3, subplot_spec=top_level_grid[source_ind], wspace=0)
        except:
            continue
        for cutout_n, cutout in enumerate(cutouts):
            plt.subplot(subgrid[0, cutout_n])
            plt.imshow(cutout, vmin=z1, vmax=z2, cmap=plt.cm.gray)
            plt.gca().xaxis.set_ticklabels([])
            plt.gca().yaxis.set_ticklabels([])

        plt.subplot(subgrid[0, 0])
        source_ind += 1
        #if is_dipole:
        #print(source_n, source_id)
        plt.ylabel(str(source_n) + dipoleLabel)
    ids = args.id
    verbose = args.verbose
    visit = args.visit

    if ids is not None:
        mat = re.search(r"visit=(\d+)", ids)
        if not mat:
            print("Please specify a visit", file=sys.stderr)
            sys.exit(1)
        visit = int(mat.group(1))

        if args.visit is not None and visit != args.visit:
            print("Please specify either --id or --visit (or be consistent)", file=sys.stderr)
            sys.exit(1)

    butler = dafPersist.Butler(args.input)
    #
    # Lookup the amp names
    #
    camera = butler.get("camera")
    det = list(camera)[0]
    ampNames = [a.getName() for a in det]

    if visit is None:
        visit = butler.queryMetadata("raw", ["visit"])[0]

    dataId = dict(visit=visit)
    #
    # Due to butler stupidity it can't/won't lookup things when you also
    # specify a channel.  Sigh
    #
Exemple #29
0
def coaddImageCutout(root,
                     ra,
                     dec,
                     size,
                     saveMsk=True,
                     saveSrc=True,
                     filt='HSC-I',
                     prefix='hsc_coadd_cutout',
                     circleMatch=True,
                     verbose=True,
                     extraField1=None,
                     extraValue1=None,
                     butler=None,
                     no_bright_object=False):
    """Cutout coadd image around a RA, DEC."""
    # No longer support hscPipe < 4
    coaddData = "deepCoadd_calexp"

    # Get the SkyMap of the database
    if butler is None:
        try:
            butler = dafPersist.Butler(root)
            if verbose:
                print(SEP)
                print("## Load in the Butler ...")
        except Exception:
            raise Exception("### Can not load the Butler")
    skyMap = butler.get("deepCoadd_skyMap", immediate=True)

    # Get the expected cutout size
    dimExpect = (2 * size + 1)
    sizeExpect = dimExpect**2
    # Cutout size in unit of degree
    sizeDeg = size * 0.168 / 3600.0

    # Verbose
    if verbose:
        print(SEP)
        print(" Input Ra, Dec: %10.5f, %10.5f" % (ra, dec))
        print(" Cutout size is expected to be %d x %d" %
              (dimExpect, dimExpect))

    # First, search for the central (Ra, Dec)
    # Define the Ra, Dec pair
    coord = afwCoord.IcrsCoord(ra * afwGeom.degrees, dec * afwGeom.degrees)

    # Search for overlapped tract, patch pairs
    matches = skyMap.findClosestTractPatchList([coord])
    # Number of matched tracts
    nTract = len(matches)
    # Number of matched (patches)
    nPatch = 0
    for tt in range(nTract):
        nPatch += len(matches[tt][1])
    if verbose:
        print("## Find %d possible matches !" % nPatch)

    matchCen = []
    for tract, patch in matches:

        # Get the (tract, patch) ID
        tractId = tract.getId()
        patchId = "%d,%d" % patch[0].getIndex()
        if verbose:
            print("## Choose (Tract, Patch) for center: %d, %s !" %
                  (tractId, patchId))
        matchCen.append((tractId, patchId))
        # Get the coadd images
        # Try to load the coadd Exposure; the skymap covers larger area than
        # available data, which will cause Butler to fail sometime
        try:
            coadd = butler.get(coaddData,
                               tract=tractId,
                               patch=patchId,
                               filter=filt,
                               immediate=True)
        except Exception, errMsg:
            print(WAR)
            print(" The desired coordinate is not available !!! ")
            print(WAR)
            print(errMsg)
            """ TODO """
            coaddFound = False
            noData = True
            partialCut = True
            continue
        else:
            """
            It's still possible that the matched location actually has no
            useful data (e.g. have been interpolated, or in the NO_DATA
            part of the patch)
            For this situation, no psf image can be generated !!
            """
            coaddFound = True
            # Get the Coadded PSF image
            psfImg = getCoaddPsfImage(coadd, coord, label=prefix)
            if psfImg is None:
                noData = True
                partialCut = True
                continue
            else:
                noData = False
            # Get the WCS information
            wcs = coadd.getWcs()
            # Convert the central coordinate from Ra,Dec to pixel unit
            pixel = wcs.skyToPixel(coord)
            pixel = afwGeom.Point2I(pixel)
            # Define the bounding box for the central pixel
            bbox = afwGeom.Box2I(pixel, pixel)
            # Grow the bounding box to the desired size
            bbox.grow(int(size))
            # Compare to the coadd image, and clip
            bbox.clip(coadd.getBBox(afwImage.PARENT))
            if bbox.isEmpty():
                noData = True
                partialCut = True
                continue
            else:
                if bbox.getArea() < sizeExpect:
                    partialCut = True
                    if verbose:
                        print("## Cut out image dimension " + "is : %d x %d " %
                              (bbox.getWidth(), bbox.getHeight()))
                else:
                    partialCut = False
            # Make a new ExposureF object for the cutout region
            subImage = afwImage.ExposureF(coadd, bbox, afwImage.PARENT)
            # Get the WCS
            subWcs = subImage.getWcs()
            # Get the central pixel coordinates on new subImage WCS
            newX, newY = subWcs.skyToPixel(coord)
            # Get the new origin pixel
            newOriX, newOriY = subImage.getImage().getXY0()
            newX = newX - newOriX
            newY = newY - newOriY
            # Get the header of the new subimage
            subHead = subImage.getMetadata()
            subHead.set('RA_CUT', ra)
            subHead.set('DEC_CUT', dec)
            subHead.set('NEW_X', newX)
            subHead.set('NEW_Y', newY)
            if partialCut:
                subHead.set('PARTIAL', 1)
            else:
                subHead.set('PARTIAL', 0)
            if (extraField1 is not None) and (extraValue1 is not None):
                subHead.set(extraField1, extraValue1)

            # To see if data are available for all the cut-out region
            if partialCut:
                warnings.warn("## Only part of the region is available" +
                              " : %d, %s" % (tractId, patchId))
                outPre = (prefix + '_' + str(tractId) + '_' + patchId + '_' +
                          filt + '_cent')
            else:
                outPre = (prefix + '_' + str(tractId) + '_' + patchId + '_' +
                          filt + '_full')
            # Define the output file name
            outImg = outPre + '.fits'
            outPsf = outPre + '_psf.fits'
            # Save the cutout image to a new FITS file
            subImage.writeFits(outImg)
            # Save the PSF image
            psfImg.writeFits(outPsf)
            if saveMsk is True:
                # Get the "Bad" mask plane
                mskBad = getCoaddBadMsk(subImage,
                                        no_bright_object=no_bright_object)
                mskBad.writeFits(outPre + '_bad.fits')

            if saveSrc is True:

                # Get the forced photometry source catalog
                """ Sometimes the forced photometry catalog is missing """
                try:
                    # TODO: Maybe the measurement catalog is better
                    srcCat = butler.get('deepCoadd_meas',
                                        tract=tractId,
                                        patch=patchId,
                                        filter=filt,
                                        immediate=True,
                                        flags=afwTable.SOURCE_IO_NO_FOOTPRINTS)
                    # Get the pixel coordinates for all objects
                    srcRa = np.array(
                        map(lambda x: x.get('coord').getRa().asDegrees(),
                            srcCat))
                    srcDec = np.array(
                        map(lambda x: x.get('coord').getDec().asDegrees(),
                            srcCat))
                    # Simple Box match
                    indMatch = ((srcRa > (ra - sizeDeg)) & (srcRa <
                                                            (ra + sizeDeg)) &
                                (srcDec > (dec - sizeDeg)) & (srcDec <
                                                              (dec + sizeDeg)))
                    # Extract the matched subset
                    srcMatch = srcCat.subset(indMatch)
                    # Save the src catalog to a FITS file
                    outSrc = outPre + '_src.fits'
                    srcMatch.writeFits(outSrc)
                except Exception:
                    print("### Tract: %d  Patch: %s" % (tractId, patchId))
                    warnings.warn("### Can not find the *force catalog !")
                    noSrcFile = prefix + '_nosrc_' + filt + '.lis'
                    if not os.path.isfile(noSrcFile):
                        os.system('touch ' + noSrcFile)
                    with open(noSrcFile, "a") as noSrc:
                        try:
                            noSrc.write("%d  %s \n" % (tractId, patchId))
                            fcntl.flock(noSrc, fcntl.LOCK_UN)
                        except IOError:
                            pass
Exemple #30
0
    def setUp(self):
        self.testData = tempfile.mkdtemp(dir=ROOT,
                                         prefix='TestCompositeTestCase-')
        self.firstRepoPath = os.path.join(self.testData, 'repo1')
        self.objA = dpTest.TestObject("abc")
        self.objB = dpTest.TestObject("def")
        self.policy = dafPersist.Policy({
            'camera': 'lsst.afw.cameraGeom.Camera',
            'datasets': {
                'basicObject1': {
                    'python': 'lsst.daf.persistence.test.TestObject',
                    'template': 'basic/id%(id)s.pickle',
                    'storage': 'PickleStorage'
                },
                'basicObject2': {
                    'python': 'lsst.daf.persistence.test.TestObject',
                    'template': 'basic/name%(name)s.pickle',
                    'storage': 'PickleStorage'
                },
                'basicPair': {
                    'python':
                    'lsst.daf.persistence.test.TestObjectPair',
                    'composite': {
                        'a': {
                            'datasetType': 'basicObject1'
                        },
                        'b': {
                            'datasetType': 'basicObject2'
                        }
                    },
                    'assembler':
                    'lsst.daf.persistence.test.TestObjectPair.assembler',
                    'disassembler':
                    'lsst.daf.persistence.test.TestObjectPair.disassembler'
                },
                'stdTestType': {
                    'python': 'lsst.daf.persistence.test.TestObjectPair',
                    'composite': {
                        'a': {
                            'datasetType': 'basicObject1'
                        },
                        'b': {
                            'datasetType': 'basicObject2'
                        }
                    }
                },
                'bypassTestType': {
                    'python': 'lsst.daf.persistence.test.TestObjectPair',
                    'composite': {
                        'a': {
                            'datasetType': 'basicObject1'
                        },
                        'b': {
                            'datasetType': 'basicObject2'
                        }
                    }
                }
            }
        })

        repoArgs = dafPersist.RepositoryArgs(
            root=self.firstRepoPath,
            policy=self.policy,
            mapper='lsst.obs.base.test.CompositeMapper')
        butler = dafPersist.Butler(outputs=repoArgs)
        butler.put(self.objA, 'basicObject1', dataId={'id': 'foo'})
        butler.put(self.objB, 'basicObject2', dataId={'name': 'bar'})
        del butler
        del repoArgs