Example #1
0
def main():
    import argparse

    parser = argparse.ArgumentParser()
    parser.add_argument("--name1", help="Name for first data set")
    parser.add_argument("--name2", help="Name for second data set")
    parser.add_argument("--plot-prefix", default="compare", help='Prefix for plot filenames; default "%default"')
    parser.add_argument("--match", default=1.0, help="Astrometric cross-match distance in arcsec")
    parser.add_argument("dir1", help="First directory to compare")
    parser.add_argument("dir2", help="Second directory to compare")

    opt = parser.parse_args()

    ps = PlotSequence(opt.plot_prefix)

    name1 = opt.name1
    if name1 is None:
        name1 = os.path.basename(opt.dir1)
        if not len(name1):
            name1 = os.path.basename(os.path.dirname(opt.dir1))
    name2 = opt.name2
    if name2 is None:
        name2 = os.path.basename(opt.dir2)
        if not len(name2):
            name2 = os.path.basename(os.path.dirname(opt.dir2))
    tt = "Comparing %s to %s" % (name1, name2)

    # regex for tractor-*.fits catalog filename
    catre = re.compile("tractor-.*.fits")

    cat1, cat2 = [], []
    for basedir, cat in [(opt.dir1, cat1), (opt.dir2, cat2)]:
        for dirpath, dirnames, filenames in os.walk(basedir, followlinks=True):
            for fn in filenames:
                if not catre.match(fn):
                    print("Skipping", fn, "due to filename")
                    continue
                fn = os.path.join(dirpath, fn)
                t = fits_table(fn)
                print(len(t), "from", fn)
                cat.append(t)
    cat1 = merge_tables(cat1, columns="fillzero")
    cat2 = merge_tables(cat2, columns="fillzero")
    print("Total of", len(cat1), "from", name1)
    print("Total of", len(cat2), "from", name2)
    cat1.cut(cat1.brick_primary)
    cat2.cut(cat2.brick_primary)
    print("Total of", len(cat1), "BRICK_PRIMARY from", name1)
    print("Total of", len(cat2), "BRICK_PRIMARY from", name2)

    cat1.cut((cat1.decam_anymask[:, 1] == 0) * (cat1.decam_anymask[:, 2] == 0) * (cat1.decam_anymask[:, 4] == 0))
    cat2.cut((cat2.decam_anymask[:, 1] == 0) * (cat2.decam_anymask[:, 2] == 0) * (cat2.decam_anymask[:, 4] == 0))
    print("Total of", len(cat1), "unmasked from", name1)
    print("Total of", len(cat2), "unmasked from", name2)

    I, J, d = match_radec(cat1.ra, cat1.dec, cat2.ra, cat2.dec, opt.match / 3600.0, nearest=True)
    print(len(I), "matched")
    matched1 = cat1[I]
    matched2 = cat2[J]
def decals_dr3():
    basedir = os.environ['LEGACY_SURVEY_DIR']
    cam = 'decam'
    image_basedir = os.path.join(basedir, 'images')

    TT = []

    #zpdir = '/project/projectdirs/cosmo/work/decam/cats/ZeroPoints'
    for fn,dirnms in [
        ('/global/cscratch1/sd/desiproc/zeropoints/decals-zpt-dr3pr1233b.fits',
         ['CP20140810_?_v2',
          'CP20141227', 'CP20150108', 'CP20150326',
          'CP20150407', 'CP20151010', 'CP20151028', 'CP20151126',
          'CP20151226', 'CP20160107', 'CP20160225',
          'COSMOS', 'CPDES82',
          'NonDECaLS/*',
         ]),
        ]:
        T = normalize_zeropoints(fn, dirnms, image_basedir, cam)
        TT.append(T)
    T = merge_tables(TT)
    outfn = 'zp.fits'
    T.writeto(outfn)
    print('Wrote', outfn)

    nd = np.array(['NonDECaLS' in fn for fn in T.image_filename])
    I = np.flatnonzero(nd)
    T[I].writeto('survey-ccds-nondecals.fits')

    I = np.flatnonzero(np.logical_not(nd))
    T[I].writeto('survey-ccds-decals.fits')

    for fn in ['survey-ccds-nondecals.fits', 'survey-ccds-decals.fits']:
        os.system('gzip --best ' + fn)
def decals_dr3_plus():
    basedir = os.environ['LEGACY_SURVEY_DIR']
    cam = 'decam'
    image_basedir = os.path.join(basedir, 'images')

    TT = []

    expnums = [547257, 535154, 535106]
    for fn,dirnms in [
        ('/global/homes/a/arjundey/ZeroPoints/decals-zpt-20160407.fits',
         ['CP20160407',]),
        ('/global/homes/a/arjundey/ZeroPoints/decals-zpt-20160606.fits',
         ['CP20160606',]),
        ]:
        T = fits_table(fn)
        T.cut(np.nonzero([e not in expnums for e in T.expnum])[0])
        normalize_zeropoints(fn, dirnms, image_basedir, cam, T=T)
        TT.append(T)

    dirnms = ['CP20160407','CP20160606']
    for fn in [
        '/global/cscratch1/sd/arjundey/ZeroPoints/zeropoint-c4d_160408_023844_oki_r_v1.fits',
        '/global/cscratch1/sd/arjundey/ZeroPoints/zeropoint-c4d_160408_001343_oki_r_v1.fits',
        '/global/cscratch1/sd/arjundey/ZeroPoints/zeropoint-c4d_160607_023641_oki_g_v1.fits',
        ]:
        T = normalize_zeropoints(fn, dirnms, image_basedir, cam)
        TT.append(T)

    T = merge_tables(TT)
    outfn = 'survey-ccds-dr3plus.fits'
    T.writeto(outfn)
    print('Wrote', outfn)

    for fn in [outfn]:
        os.system('gzip --best ' + fn)
def bok_dr4():
    basedir = os.environ['LEGACY_SURVEY_DIR'] #'/scratch1/scratchdirs/desiproc/DRs/dr4-bootes/legacypipe-dir'
    cam = '90prime'
    image_basedir = os.path.join(basedir, 'images')
    

    #/scratch1/scratchdirs/desiproc/DRs/cp-images/bootes/project/projectdirs/cosmo/staging/bok/BOK_CP/CP20160703
    #/scratch2/scratchdirs/arjundey/ForKaylan/zeropoint-ksb_160704_052458_ooi_g_v1.fits
    TT = []
    for fn,dirnms in [
        ('arjuns-ccds-90prime.fits',
         list(np.loadtxt('bok_cpdirs.txt',dtype=str))),
        #(os.path.join(zpdir, 'r/zeropoint-BOK20150413_g.fits'),
        # [os.path.join(zpdir, 'g')]),
        ]:
        T = normalize_zeropoints(fn, dirnms, image_basedir, cam)
        # fake up the exposure number
        T.expnum = (T.mjd_obs * 100000.).astype(int)
        # compute extension name
        T.ccdname = np.array(['ccd%i' % n for n in T.ccdnum])
        # compute FWHM from Seeing
        pixscale = 0.45
        T.fwhm = T.seeing / pixscale

        T.expid = np.array(['%10i-%s' % (expnum,extname.strip())
                            for expnum,extname in zip(T.expnum, T.ccdname)])

        TT.append(T)
    T = merge_tables(TT)
    outfn = 'survey-ccds-90prime.fits.gz'
    T.writeto(outfn)
    print('Wrote', outfn)
def mzls_dr4(v2=True): 
    if v2:
        zpname='arjuns-ccds-mzls-v2thruMarch19.fits'
        savefn='survey-ccds-mzls-v2thruMarch19.fits.gz'
    else:
        zpname='arjuns-ccds-mzls-v3.fits'
        savefn='survey-ccds-mzls-v3.fits.gz'
    basedir = os.environ['LEGACY_SURVEY_DIR']
    cam = 'mosaic'
    image_basedir = os.path.join(basedir, 'images')
    TT = []

    # mzls_cpdirs_...txt is list of all possible CP directories
    for fn,dirnms in [
        (zpname,
         list(np.loadtxt('mzls_cpdirs_thruMarch19.txt',dtype=str))),
        ]:
        T = fits_table(fn)
        normalize_zeropoints(fn, dirnms, image_basedir, cam, T=T)
        TT.append(T)
    T = merge_tables(TT)

    I = np.flatnonzero(T.fwhm == 0)
    if len(I):
        T.fwhm[I] = T.seeing[I] / 0.262

    T.writeto(savefn)
    print('Wrote', savefn)
def decals_dr2():
    decals_dir = os.environ['DECALS_DIR']
    cam = 'decam'
    image_basedir = os.path.join(decals_dir, 'images')

    TT = []

    #zpdir = '/project/projectdirs/cosmo/work/decam/cats/ZeroPoints'
    for fn,dirnms in [
        ('/scratch1/scratchdirs/desiproc/ZeroPoints/decals-zpt-all-2015oct30.fits',
         ['CP20140810_?_v2', 'CP20141227', 'CP20150108', 'CP20150326',
          'CP20150407', 'NonDECaLS/*','COSMOS', 'CPDES82']),
        # (os.path.join(zpdir, 'decals-zpt-20140810.fits'), ['CP20140810_?_v2']),
        # (os.path.join(zpdir, 'decals-zpt-20141227.fits'), ['CP20141227']),
        # (os.path.join(zpdir, 'decals-zpt-20150108.fits'), ['CP20150108']),
        # (os.path.join(zpdir, 'decals-zpt-20150326.fits'), ['CP20150326']),
        # (os.path.join(zpdir, 'decals-zpt-20150407.fits'), ['CP20150407']),
        # (os.path.join(zpdir, 'decals-zpt-nondecals.fits'), ['NonDECaLS/*','COSMOS', 'CPDES82']),
        ]:
        T = normalize_zeropoints(fn, dirnms, image_basedir, cam)
        TT.append(T)
    T = merge_tables(TT)
    outfn = 'zp.fits'
    T.writeto(outfn)
    print('Wrote', outfn)
Example #7
0
def stack_tables(fn_list, textfile=True, shuffle=None):
    '''concatenates fits tables

    Args:
        shuffle: set to an integer to randomly reads up to the first "shuffle" cats only
    '''
    if shuffle:
        assert (isinstance(shuffle, int))
    if textfile:
        fns = read_lines(fn_list)
    else:
        fns = fn_list
    if len(fns) < 1: raise ValueError('Error: fns=', fns)
    if shuffle:
        print('shuffling %d' % shuffle)
        seed = 7
        np.random.seed(seed)
        inds = np.arange(len(fns))
        np.random.shuffle(inds)
        fns = fns[inds]
    cats = []
    for i, fn in enumerate(fns):
        print('reading %s %d/%d' % (fn, i + 1, len(fns)))
        if shuffle and i >= shuffle:
            print('shuffle_1000 turned ON, stopping read')
            break
        try:
            tab = fits_table(fn)
            cats.append(tab)
        except IOError:
            print('Fits file does not exist: %s' % fn)
    return merge_tables(cats, columns='fillzero')
def mzls_to_20160315():
    basedir = os.environ['LEGACY_SURVEY_DIR']
    cam = 'mosaic'
    image_basedir = os.path.join(basedir, 'images')
    TT = []

    for fn,dirnms in [
        ('/global/homes/a/arjundey/ZeroPoints/mzls-zpt-all.fits',
         ['CP20160202','CP20160203','CP20160204','CP20160205','CP20160206','CP20160208',
          'CP20160209','CP20160210','CP20160211','CP20160212','CP20160213','CP20160214',
          'CP20160215','CP20160216','CP20160217','CP20160219','CP20160224','CP20160225',
          'CP20160226','CP20160227','CP20160228','CP20160229','CP20160301','CP20160302',
          'CP20160303','CP20160304','CP20160305','CP20160306','CP20160308','CP20160309',
          'CP20160310','CP20160311','CP20160312','CP20160313','CP20160314','CP20160315',
          'CP20160316','CP20160317','CP20160318','CP20160319','CP20160320','CP20160325',
          'CP20160326','CP20160327','CP20160328','CP20160330','CP20160331','CP20160401',
          'CP20160402','CP20160403','CP20160404','CP20160408',]),
        ]:
        T = fits_table(fn)
        normalize_zeropoints(fn, dirnms, image_basedir, cam, T=T)
        TT.append(T)
    T = merge_tables(TT)

    I = np.flatnonzero(T.fwhm == 0)
    if len(I):
        T.fwhm[I] = T.seeing[I] / 0.262

    outfn = 'survey-ccds-mzls-to-20160315.fits'
    T.writeto(outfn)
    print('Wrote', outfn)

    for fn in [outfn]:
        os.system('gzip --best ' + fn)
Example #9
0
 def get_healpix_catalogs(self, healpixes):
     from astrometry.util.fits import merge_tables
     cats = []
     for hp in healpixes:
         cats.append(self.get_healpix_catalog(hp))
     if len(cats) == 1:
         return cats[0]
     return merge_tables(cats)
Example #10
0
def cat_sdss(req, ver):
    import json
    import numpy as np
    from astrometry.util.starutil_numpy import degrees_between, radectoxyz, xyztoradec
    from map.views import sdss_ccds_near
    from astrometry.util.fits import fits_table, merge_tables

    tag = 'sdss-cat'
    ralo = float(req.GET['ralo'])
    rahi = float(req.GET['rahi'])
    declo = float(req.GET['declo'])
    dechi = float(req.GET['dechi'])

    ver = int(ver)
    if not ver in catversions[tag]:
        raise RuntimeError('Invalid version %i for tag %s' % (ver, tag))

    rad = degrees_between(ralo, declo, rahi, dechi) / 2.
    xyz1 = radectoxyz(ralo, declo)
    xyz2 = radectoxyz(rahi, dechi)
    xyz = (xyz1 + xyz2)
    xyz /= np.sqrt(np.sum(xyz**2))
    rc,dc = xyztoradec(xyz)
    rad = rad + np.hypot(10.,14.)/2./60.
    ccds = sdss_ccds_near(rc[0], dc[0], rad)
    if ccds is None:
        print('No SDSS CCDs nearby')
        return HttpResponse(json.dumps(dict(rd=[])),
                            content_type='application/json')
    print(len(ccds), 'SDSS CCDs')

    T = []
    for ccd in ccds:
        # env/BOSS_PHOTOOBJ/301/2073/3/photoObj-002073-3-0088.fits
        fn = os.path.join(settings.SDSS_BASEDIR, 'env', 'BOSS_PHOTOOBJ',
                          str(ccd.rerun), str(ccd.run), str(ccd.camcol),
                          'photoObj-%06i-%i-%04i.fits' % (ccd.run, ccd.camcol, ccd.field))
        print('Reading', fn)
        T.append(fits_table(fn, columns='ra dec objid mode objc_type objc_flags objc_flags nchild tai expflux devflux psfflux cmodelflux fracdev mjd'.split()))
    T = merge_tables(T)
    T.cut((T.dec >= declo) * (T.dec <= dechi))
    # FIXME
    T.cut((T.ra  >= ralo) * (T.ra <= rahi))
    
    # primary
    T.cut(T.mode == 1)
    types = ['P' if t == 6 else 'C' for t in T.objc_type]
    fluxes = [p if t == 6 else c for t,p,c in zip(T.objc_type, T.psfflux, T.cmodelflux)]

    return HttpResponse(json.dumps(dict(
        rd=[(float(o.ra),float(o.dec)) for o in T],
        sourcetype=types,
        fluxes = [dict(u=float(f[0]), g=float(f[1]), r=float(f[2]),
                       i=float(f[3]), z=float(f[4])) for f in fluxes],
    )),
                        content_type='application/json')
 def get_healpix_rangesearch_catalogs(self, healpixes, rc, dc, rad):
     cats = []
     for hp in healpixes:
         (kd, tab) = self.get_healpix_tree(hp)
         I = tree_search_radec(kd, rc, dc, rad)
         if len(I):
             cats.append(tab[I])
     if len(cats) == 1:
         return cats[0]  #.copy()
     return merge_tables(cats)
def decals_dr3_extra():
    # /global/homes/a/arjundey/ZeroPoints/decals-zpt-dr3-all.fits
    T = fits_table('/global/cscratch1/sd/desiproc/zeropoints/decals-zpt-dr3-all.fits')

    S1 = fits_table('survey-ccds-nondecals.fits.gz')
    S2 = fits_table('survey-ccds-decals.fits.gz')
    S = merge_tables([S1,S2])

    gotchips = set(zip(S.expnum, S.ccdname))

    got = np.array([(e,c) in gotchips for e,c in zip(T.expnum, T.ccdname)])
    print('Found', sum(got), 'of', len(T), 'dr3-all CCDs in existing surveys tables of size', len(S))

    I = np.flatnonzero(np.logical_not(got))
    T.cut(I)
    print(len(T), 'remaining')
    #print('Directories:', np.unique([os.path.basename(os.path.dirname(fn.strip())) for fn in T.filename]))
    print('Filenames:', np.unique(T.filename))

    T.writeto('extras.fits')

    basedir = os.environ['LEGACY_SURVEY_DIR']
    cam = 'decam'
    image_basedir = os.path.join(basedir, 'images')
    TT = []

    for fn,dirnms in [
        ('extras.fits',
         ['CP20140810_?_v2',
          'CP20141227', 'CP20150108', 'CP20150326',
          'CP20150407', 'CP20151010', 'CP20151028', 'CP20151126',
          'CP20151226', 'CP20160107', 'CP20160225',
          'COSMOS', 'CPDES82',
          'NonDECaLS/*',
         ]),
        ]:
        T = normalize_zeropoints(fn, dirnms, image_basedir, cam)
        TT.append(T)
    T = merge_tables(TT)
    outfn = 'survey-ccds-extra.fits'
    T.writeto(outfn)
    print('Wrote', outfn)
Example #13
0
 def run(self, bricks_to_merge):
     Tlist = []
     for brick in bricks_to_merge:
         fn = self.table_fn(brick)
         if os.path.exists(fn):
             tab = fits_table(self.table_fn(brick))
             Tlist.append(tab)
         else:
             print('Skipping brick %s b/c randoms table doesnt exist' % fn)
     T = merge_tables(Tlist, columns='fillzero')
     T.writeto(self.savefn)
     print('Wrote %s' % self.savefn)
Example #14
0
def merge_splinesky(survey, expnum, C, skyoutfn, opt):
    skies = []
    imobjs = []
    Cgood = []
    for ccd in C:
        im = survey.get_image_object(ccd)
        fn = im.skyfn
        if not os.path.exists(fn):
            print('File not found:', fn)
            if opt.all_found:
                return 0
            continue
        imobjs.append(im)
        Cgood.append(ccd)

    for ccd, im in zip(Cgood, imobjs):
        fn = im.skyfn
        print('Reading', fn)
        T = None
        try:
            T = fits_table(fn)
        except KeyboardInterrupt as e:
            raise e
        except:
            print('Failed to read file', fn, ':', sys.exc_info()[1])
        if T is not None:
            skies.append(T)

    if len(skies) == 0:
        return
    T = fits_table()
    T.gridw = np.array([t.gridvals[0].shape[1] for t in skies])
    T.gridh = np.array([t.gridvals[0].shape[0] for t in skies])
    padded = pad_arrays([t.gridvals[0] for t in skies])
    T.gridvals = np.concatenate([[p] for p in padded])
    padded = pad_arrays([t.xgrid[0] for t in skies])
    T.xgrid = np.concatenate([[p] for p in padded])
    padded = pad_arrays([t.ygrid[0] for t in skies])
    T.ygrid = np.concatenate([[p] for p in padded])

    cols = skies[0].columns()
    for c in ['gridvals', 'xgrid', 'ygrid', 'gridw', 'gridh']:
        cols.remove(c)

    T.add_columns_from(merge_tables(skies, columns=cols))
    fn = skyoutfn
    trymakedirs(fn, dir=True)
    tmpfn = os.path.join(os.path.dirname(fn), 'tmp-' + os.path.basename(fn))
    T.writeto(tmpfn)
    os.rename(tmpfn, fn)
    print('Wrote', fn)
    return 1
Example #15
0
def cat_vcc(req, ver):
    import json
    tag = 'ngc'
    ralo = float(req.GET['ralo'])
    rahi = float(req.GET['rahi'])
    declo = float(req.GET['declo'])
    dechi = float(req.GET['dechi'])

    ver = int(ver)
    if not ver in catversions[tag]:
        raise RuntimeError('Invalid version %i for tag %s' % (ver, tag))

    from astrometry.util.fits import fits_table, merge_tables
    import numpy as np
    from decals import settings

    TT = []
    T = fits_table(os.path.join(settings.DATA_DIR, 'virgo-cluster-cat-2.fits'))
    print(len(T), 'in VCC 2; ra', ralo, rahi, 'dec', declo, dechi)
    T.cut((T.ra > ralo) * (T.ra < rahi) * (T.dec > declo) * (T.dec < dechi))
    print(len(T), 'in cut')
    TT.append(T)

    T = fits_table(os.path.join(settings.DATA_DIR, 'virgo-cluster-cat-3.fits'))
    print(len(T), 'in VCC 3; ra', ralo, rahi, 'dec', declo, dechi)
    T.cut((T.ra > ralo) * (T.ra < rahi) * (T.dec > declo) * (T.dec < dechi))
    print(len(T), 'in cut')
    T.evcc_id = np.array(['-']*len(T))
    T.rename('id', 'vcc_id')
    TT.append(T)
    T = merge_tables(TT)

    rd = list((float(r),float(d)) for r,d in zip(T.ra, T.dec))
    names = []

    for t in T:
        evcc = t.evcc_id.strip()
        vcc = t.vcc_id.strip()
        ngc = t.ngc.strip()
        nms = []
        if evcc != '-':
            nms.append('EVCC ' + evcc)
        if vcc != '-':
            nms.append('VCC ' + vcc)
        if ngc != '-':
            nms.append('NGC ' + ngc)
        names.append(' / '.join(nms))

    return HttpResponse(json.dumps(dict(rd=rd, name=names)),
                        content_type='application/json')
Example #16
0
def cat_vcc(req, ver):
    import json
    tag = 'ngc'
    ralo = float(req.GET['ralo'])
    rahi = float(req.GET['rahi'])
    declo = float(req.GET['declo'])
    dechi = float(req.GET['dechi'])

    ver = int(ver)
    if not ver in catversions[tag]:
        raise RuntimeError('Invalid version %i for tag %s' % (ver, tag))

    from astrometry.util.fits import fits_table, merge_tables
    import numpy as np
    from decals import settings

    TT = []
    T = fits_table(os.path.join(settings.DATA_DIR, 'virgo-cluster-cat-2.fits'))
    print(len(T), 'in VCC 2; ra', ralo, rahi, 'dec', declo, dechi)
    T.cut((T.ra > ralo) * (T.ra < rahi) * (T.dec > declo) * (T.dec < dechi))
    print(len(T), 'in cut')
    TT.append(T)

    T = fits_table(os.path.join(settings.DATA_DIR, 'virgo-cluster-cat-3.fits'))
    print(len(T), 'in VCC 3; ra', ralo, rahi, 'dec', declo, dechi)
    T.cut((T.ra > ralo) * (T.ra < rahi) * (T.dec > declo) * (T.dec < dechi))
    print(len(T), 'in cut')
    T.evcc_id = np.array(['-'] * len(T))
    T.rename('id', 'vcc_id')
    TT.append(T)
    T = merge_tables(TT)

    rd = list((float(r), float(d)) for r, d in zip(T.ra, T.dec))
    names = []

    for t in T:
        evcc = t.evcc_id.strip()
        vcc = t.vcc_id.strip()
        ngc = t.ngc.strip()
        nms = []
        if evcc != '-':
            nms.append('EVCC ' + evcc)
        if vcc != '-':
            nms.append('VCC ' + vcc)
        if ngc != '-':
            nms.append('NGC ' + ngc)
        names.append(' / '.join(nms))

    return HttpResponse(json.dumps(dict(rd=rd, name=names)),
                        content_type='application/json')
Example #17
0
    def __init__(self, imagedir, nom, opt):
        super(NgcBot, self).__init__(imagedir, backlog=False)
        self.nom = nom
        self.opt = opt

        # Read catalogs
        TT = []
        import astrometry
        catdir = os.path.join(os.path.dirname(astrometry.__file__), 'catalogs')
        fn = os.path.join(catdir, 'ngc2000.fits')
        print('Reading', fn)
        T = fits_table(fn)
        T.delete_column('ngcnum')
        TT.append(T)

        fn = os.path.join(catdir, 'ic2000.fits')
        print('Reading', fn)
        T = fits_table(fn)
        T.delete_column('icnum')
        TT.append(T)
        self.cat = merge_tables(TT, columns='fillzero')
        del TT

        print('Total of', len(self.cat), 'NGC/IC objects')
        omit = [
            'OC',  # open cluster
            'Ast',  # asterism
            '***',  # triple star
            'D*',  # double star
            '*',  # star
            '-',  # called nonexistent in RNGC
            'PD'  # plate defect
        ]
        # '?', # uncertain type or may not exist
        # '', # unidentified or type unknown
        print(Counter(self.cat.classification))

        self.cat.cut(
            np.flatnonzero(
                np.array(
                    [t.strip() not in omit for t in self.cat.classification])))
        print('Cut to', len(self.cat), 'NGC/IC objects')

        print('Remaining classifications:', np.unique(self.cat.classification))

        self.spec = fits_table('specObj-dr12-trim-2.fits')

        self.cached_flats = {}
        self.read_flats = opt.flats
Example #18
0
    def __init__(self, ccds_used_wildcard=None):
        '''plot ra,dec of ccds used (the footprint for this run)

        Args:
            ccds_used_wildcard: path to ccds file or path containing an asterisk
        '''
        if "*" in ccds_used_wildcard:
            fns = glob(ccds_used_wildcard)
            assert (len(fns) > 0)
            ccds = [
                fits_table(fn, columns=['ra', 'dec', 'expid']) for fn in fns
            ]
            self.ccds = merge_tables(ccds)
            del ccds
        else:
            self.ccds = fits_table(ccds_used_wildcard)
def decals_dr3_fix392400():
    T = fits_table('survey-ccds-decals.fits.gz')
    print('Started with', len(T), 'CCDs')
    T.cut(T.expnum != 392400)
    print('Removed bad CCDs:', len(T))

    basedir = os.environ['LEGACY_SURVEY_DIR']
    cam = 'decam'
    image_basedir = os.path.join(basedir, 'images')
    fn = '/global/cscratch1/sd/desiproc/zeropoints/zeropoint-c4d_141228_060426_ooi_g_v1.fits'
    dirnms = ['CP20141227']
    Tnew = normalize_zeropoints(fn, dirnms, image_basedir, cam)
    print('Replacement CCDs:', len(Tnew))
    T = merge_tables([T, Tnew])
    print('Merged:', len(T))
    T.writeto('new-survey-ccds-decals.fits.gz')
Example #20
0
 def vipers(self):
     # Data
     w1 = fits_table(os.path.join(self.truth_dir, 'vipers-w1.fits.gz'))
     w4 = fits_table(os.path.join(self.truth_dir, 'vipers-w4.fits.gz'))
     # Bricks
     for data in [w1, w4]:
         data.set('ra', data.get('alpha'))
         data.set('dec', data.get('delta'))
     bnames = {}
     for data, key in zip([w1, w4], ['w1', 'w4']):
         bnames[key]= self.bricks_in_region(rlo=data.get('ra').min(), rhi=data.get('ra').max(),\
                                           dlo=data.get('dec').min(),dhi=data.get('dec').max())
     bricks = np.array([])
     for key in bnames.keys():
         bricks = np.concatenate((bricks, bnames[key]))
     # Tractor Catalogues --> file list
     catlist = os.path.join(self.save_dir, 'vipers_dr3_bricks.txt')
     if not os.path.exists(catlist):
         fout = open(catlist, 'w')
         for b in bricks:
             fn = os.path.join(self.dr3_dir,
                               'tractor/%s/tractor-%s.fits' % (b[:3], b))
             fout.write('%s\n' % fn)
         fout.close()
         print('Wrote %s' % catlist)
     # Merge w1,w4 for matching
     vipers = []
     for fn in [os.path.join(self.truth_dir,'vipers-w1.fits.gz'),\
                os.path.join(self.truth_dir,'vipers-w4.fits.gz')]:
         vipers.append(fits_table(fn))
     vipers = merge_tables(vipers, columns='fillzero')
     vipers.set('ra', data.get('alpha'))
     vipers.set('dec', data.get('delta'))
     # Match
     fits_funcs = CatalogueFuncs()
     dr3 = fits_funcs.stack(
         os.path.join(self.save_dir, 'vipers_dr3_bricks.txt'))
     mat = Matcher()
     imatch, imiss, d2d = mat.match_within(vipers, dr3)  #,dist=1./3600)
     vipers.cut(imatch['ref'])
     dr3.cut(imatch['obs'])
     # Save
     vipers.writeto(
         os.path.join(self.save_dir, 'vipersw1w4-dr3matched.fits'))
     dr3.writeto(os.path.join(self.save_dir, 'dr3-vipersw1w4matched.fits'))
     print('Wrote %s\nWrote %s' % (os.path.join(self.save_dir,'vipersw1w4-dr3matched.fits'),\
                                   os.path.join(self.save_dir,'dr3-vipersw1w4matched.fits')))
Example #21
0
    def get_cat(self,ra,dec):
        """Read the healpixed PS1 catalogs given input ra,dec coordinates."""
        from astrometry.util.fits import fits_table, merge_tables
        from astrometry.util.util import radecdegtohealpix, healpix_xy_to_ring
        ipring = np.empty(len(ra)).astype(int)
        for iobj, (ra1, dec1) in enumerate(zip(ra,dec)):
            hpxy = radecdegtohealpix(ra1,dec1,self.nside)
            ipring[iobj] = healpix_xy_to_ring(hpxy,self.nside)
        pix = np.unique(ipring)

        cat = list()
        for ipix in pix:
            fname = os.path.join(self.ps1dir,'ps1-'+'{:05d}'.format(ipix)+'.fits')
            print('Reading {}'.format(fname))
            cat.append(fits_table(fname))
        cat = merge_tables(cat)
        return cat
Example #22
0
    def get_cat(self,ra,dec):
        """Read the healpixed PS1 catalogs given input ra,dec coordinates."""
        from astrometry.util.fits import fits_table, merge_tables
        from astrometry.util.util import radecdegtohealpix, healpix_xy_to_ring
        ipring = np.empty(len(ra)).astype(int)
        for iobj, (ra1, dec1) in enumerate(zip(ra,dec)):
            hpxy = radecdegtohealpix(ra1,dec1,self.nside)
            ipring[iobj] = healpix_xy_to_ring(hpxy,self.nside)
        pix = np.unique(ipring)

        cat = list()
        for ipix in pix:
            fname = os.path.join(self.ps1dir,'ps1-'+'{:05d}'.format(ipix)+'.fits')
            print('Reading {}'.format(fname))
            cat.append(fits_table(fname))
        cat = merge_tables(cat)
        return cat
Example #23
0
def brick_catalog_for_radec_box(ralo, rahi, declo, dechi,
                                decals, catpattern, bricks=None):
    '''
    Merges multiple Tractor brick catalogs to cover an RA,Dec
    bounding-box.

    No cleverness with RA wrap-around; assumes ralo < rahi.

    decals: Decals object
    
    bricks: table of bricks, eg from Decals.get_bricks()

    catpattern: filename pattern of catalog files to read,
        eg "pipebrick-cats/tractor-phot-%06i.its"
    
    '''
    assert(ralo < rahi)
    assert(declo < dechi)

    if bricks is None:
        bricks = decals.get_bricks_readonly()
    I = decals.bricks_touching_radec_box(bricks, ralo, rahi, declo, dechi)
    print(len(I), 'bricks touch RA,Dec box')
    TT = []
    hdr = None
    for i in I:
        brick = bricks[i]
        fn = catpattern % brick.brickid
        print('Catalog', fn)
        if not os.path.exists(fn):
            print('Warning: catalog does not exist:', fn)
            continue
        T = fits_table(fn, header=True)
        if T is None or len(T) == 0:
            print('Warning: empty catalog', fn)
            continue
        T.cut((T.ra  >= ralo ) * (T.ra  <= rahi) *
              (T.dec >= declo) * (T.dec <= dechi))
        TT.append(T)
    if len(TT) == 0:
        return None
    T = merge_tables(TT)
    # arbitrarily keep the first header
    T._header = TT[0]._header
    return T
Example #24
0
def brick_catalog_for_radec_box(ralo, rahi, declo, dechi,
                                survey, catpattern, bricks=None):
    '''
    Merges multiple Tractor brick catalogs to cover an RA,Dec
    bounding-box.

    No cleverness with RA wrap-around; assumes ralo < rahi.

    survey: LegacySurveyData object
    
    bricks: table of bricks, eg from LegacySurveyData.get_bricks()

    catpattern: filename pattern of catalog files to read,
        eg "pipebrick-cats/tractor-phot-%06i.its"
    
    '''
    assert(ralo < rahi)
    assert(declo < dechi)

    if bricks is None:
        bricks = survey.get_bricks_readonly()
    I = survey.bricks_touching_radec_box(bricks, ralo, rahi, declo, dechi)
    print(len(I), 'bricks touch RA,Dec box')
    TT = []
    hdr = None
    for i in I:
        brick = bricks[i]
        fn = catpattern % brick.brickid
        print('Catalog', fn)
        if not os.path.exists(fn):
            print('Warning: catalog does not exist:', fn)
            continue
        T = fits_table(fn, header=True)
        if T is None or len(T) == 0:
            print('Warning: empty catalog', fn)
            continue
        T.cut((T.ra  >= ralo ) * (T.ra  <= rahi) *
              (T.dec >= declo) * (T.dec <= dechi))
        TT.append(T)
    if len(TT) == 0:
        return None
    T = merge_tables(TT)
    # arbitrarily keep the first header
    T._header = TT[0]._header
    return T
Example #25
0
def merge_psfex(survey, expnum, C, psfoutfn, opt):
    psfex = []
    imobjs = []
    Cgood = []
    fns = []
    for ccd in C:
        im = survey.get_image_object(ccd)
        for fn in [im.psffn, im.old_single_psffn]:
            if os.path.exists(fn):
                break
        if not os.path.exists(fn):
            print('File not found:', fn)
            if opt.all_found:
                return 0
            continue
        imobjs.append(im)
        Cgood.append(ccd)
        fns.append(fn)

    for fn, ccd, im in zip(fns, Cgood, imobjs):
        print('Reading', fn)
        T = fits_table(fn)

        cols = T.get_columns()
        if not 'plver' in cols:
            from legacypipe.image import psfex_single_to_merged
            T = psfex_single_to_merged(fn, ccd.expnum, ccd.ccdname)
            for k in ['plver', 'procdate', 'plprocid']:
                T.set(k, np.array([getattr(ccd, k)]))
        psfex.append(T)

    if len(psfex) == 0:
        return
    padded = pad_arrays([p.psf_mask[0] for p in psfex])
    cols = psfex[0].columns()
    cols.remove('psf_mask')
    T = merge_tables(psfex, columns=cols)
    T.psf_mask = np.concatenate([[p] for p in padded])
    fn = psfoutfn
    trymakedirs(fn, dir=True)
    tmpfn = os.path.join(os.path.dirname(fn), 'tmp-' + os.path.basename(fn))
    T.writeto(tmpfn)
    os.rename(tmpfn, fn)
    print('Wrote', fn)
    return 1
def decals_run19():
    basedir = os.environ['LEGACY_SURVEY_DIR']
    cam = 'decam'
    image_basedir = os.path.join(basedir, 'images')
    TT = []
    for fn,dirnms in [
        ('/global/homes/a/arjundey/ZeroPoints/decals-zpt-20160801_20161011.fits',
         ['CP20160801', 'CP20161011']),
        ]:
        T = fits_table(fn)
        normalize_zeropoints(fn, dirnms, image_basedir, cam, T=T)
        TT.append(T)
    T = merge_tables(TT)
    outfn = 'survey-ccds-run19.fits'
    T.writeto(outfn)
    print('Wrote', outfn)
    for fn in [outfn]:
        os.system('gzip --best ' + fn)
def decals_dr3_dedup():
    SN = fits_table('survey-ccds-nondecals.fits.gz')
    SD = fits_table('survey-ccds-decals.fits.gz')
    sne = np.unique(SN.expnum)
    sde = np.unique(SD.expnum)
    isec = set(sne).intersection(sde)
    print(len(isec), 'exposures in common between "decals" and "nondecals"')
    # These are two versions of CP reductions in CPDES82 and CPHETDEX.
    I = np.flatnonzero(np.array([e in isec for e in SD.expnum]))
    print(len(I), 'rows in "decals"')

    I = np.flatnonzero(np.array([e not in isec for e in SD.expnum]))
    SD.cut(I)
    print(len(SD), 'rows remaining')

    # Now, also move "decals" filenames containing "CPDES82" to "nondecals".
    I = np.flatnonzero(np.array(['CPDES82' in fn for fn in SD.image_filename]))
    keep = np.ones(len(SD), bool)
    keep[I] = False

    print('Merging:')
    SN.about()
    SD[I].about()
    
    SN2 = merge_tables((SN, SD[I]), columns='fillzero')
    SD2 = SD[keep]

    print('Moved CPDES82: now', len(SN2), 'non-decals and', len(SD2), 'decals')
    
    SN2.writeto('survey-ccds-nondecals.fits')
    SD2.writeto('survey-ccds-decals.fits')

    SE = fits_table('survey-ccds-extra.fits.gz')
    SN = fits_table('survey-ccds-nondecals.fits')
    SD = fits_table('survey-ccds-decals.fits')

    sne = np.unique(SN.expnum)
    sde = np.unique(SD.expnum)
    see = np.unique(SE.expnum)

    i1 = set(sne).intersection(sde)
    i2 = set(sne).intersection(see)
    i3 = set(sde).intersection(see)
    print('Intersections:', len(i1), len(i2), len(i3))
Example #28
0
 def deep2(self):
     # Data
     deep2 = {}
     for key in ['1', '2', '3', '4']:
         deep2[key] = fits_table(
             '/project/projectdirs/desi/target/analysis/truth/deep2-field%s.fits.gz'
             % key)
     # Bricks
     bnames = {}
     for key in deep2.keys():
         bnames[key]= self.bricks_in_region(rlo=deep2[key].get('ra').min(), rhi=deep2[key].get('ra').max(),\
                                           dlo=deep2[key].get('dec').min(),dhi=deep2[key].get('dec').max())
         print('Field=%s, Num Bricks=%d, Bricks:' % (key, len(bnames[key])),
               bnames[key])
     bricks = np.array([])
     for key in bnames.keys():
         bricks = np.concatenate((bricks, bnames[key]))
     # Tractor Catalogues --> file list
     catlist = os.path.join(self.save_dir, 'deep2_dr3_bricks.txt')
     if not os.path.exists(catlist):
         fout = open(catlist, 'w')
         for b in bricks:
             fn = os.path.join(self.dr3_dir,
                               'tractor/%s/tractor-%s.fits' % (b[:3], b))
             fout.write('%s\n' % fn)
         fout.close()
         print('Wrote %s' % catlist)
     # Merge for matching
     dp2 = [deep2['2'], deep2['3'], deep2['4']]
     dp2 = merge_tables(dp2, columns='fillzero')
     # Match
     fits_funcs = CatalogueFuncs()
     dr3 = fits_funcs.stack(
         os.path.join(self.save_dir, 'deep2_dr3_bricks.txt'))
     mat = Matcher()
     imatch, imiss, d2d = mat.match_within(dp2, dr3)  #,dist=1./3600)
     dp2.cut(imatch['ref'])
     dr3.cut(imatch['obs'])
     fits_funcs.set_extra_data(dr3)
     # Save
     dp2.writeto(os.path.join(self.save_dir, 'deep2f234-dr3matched.fits'))
     dr3.writeto(os.path.join(self.save_dir, 'dr3-deep2f234matched.fits'))
     print('Wrote %s\nWrote %s' % (os.path.join(self.save_dir,'deep2f234-dr3matched.fits'),\
                                   os.path.join(self.save_dir,'dr3-deep2f234matched.fits')))
Example #29
0
    def get_ccds(self):
        '''
        Returns the table of CCDs.
        '''
        from glob import glob

        fns = self.find_file('ccds')
        fns.sort()
        fns = self.filter_ccds_files(fns)
        TT = []
        for fn in fns:
            print('Reading CCDs from', fn)
            # cols = (
            #     'exptime filter propid crpix1 crpix2 crval1 crval2 ' +
            #     'cd1_1 cd1_2 cd2_1 cd2_2 ccdname ccdzpt ccdraoff ccddecoff ' +
            #     'ccdnmatch camera image_hdu image_filename width height ' +
            #     'ra dec zpt expnum fwhm mjd_obs').split()
            #T = fits_table(fn, columns=cols)
            T = fits_table(fn)
            print('Got', len(T), 'CCDs')
            TT.append(T)
        T = merge_tables(TT, columns='fillzero')
        print('Total of', len(T), 'CCDs')
        del TT

        cols = T.columns()
        # Make DR1 CCDs table somewhat compatible with DR2
        if 'extname' in cols and not 'ccdname' in cols:
            T.ccdname = T.extname
        if not 'camera' in cols:
            T.camera = np.array(['decam'] * len(T))
        if 'cpimage' in cols and not 'image_filename' in cols:
            T.image_filename = T.cpimage
        if 'cpimage_hdu' in cols and not 'image_hdu' in cols:
            T.image_hdu = T.cpimage_hdu

        # Remove trailing spaces from 'ccdname' column
        if 'ccdname' in T.columns():
            # "N4 " -> "N4"
            T.ccdname = np.array([s.strip() for s in T.ccdname])
        # Remove trailing spaces from 'camera' column.
        T.camera = np.array([c.strip() for c in T.camera])
        return T
Example #30
0
def read_chunks(ra, dec):

    chunkdir = os.environ['PS1CHUNKS']
    phi = np.radians(ra)
    theta = np.radians(90 - dec)
    pixels = healpy.pixelfunc.ang2pix(32, theta, phi)
    pixels = np.unique(pixels)
    cat = []
    for pix in pixels:
        fn = chunkdir + 'ps1-%05d.fits' % pix
        try:
            cat.append(fits_table(fn, ext=1))
        except ValueError:
            continue
        except IOError:
            continue
    if not cat:
        raise NoCalibrationStars
    return merge_tables(cat)
Example #31
0
def merge_bybrick(bricks, outdir='', prefix='', cleanup=False):
    for brick in bricks:
        outfn = get_brick_merged_fn(brickname=brick,
                                    outdir=outdir,
                                    prefix=prefix)
        if cleanup:
            if os.path.exists(outfn):
                # Safe to remove the pieces that went into outfn
                rm_fns = get_brick_sample_fns(brickname=brick,
                                              outdir=outdir,
                                              prefix=prefix)
                if not rm_fns is None:
                    print('removing files like: %s' % rm_fns[0])
                    try:
                        for rm_fn in rm_fns:
                            os.remove(rm_fn)
                    except OSError:
                        pass
            continue
        elif os.path.exists(outfn):
            # We are creating outfn, don't spend time deleting
            continue
        else:
            print('outfn=%s' % outfn)
            fns = get_brick_sample_fns(brickname=brick,
                                       outdir=outdir,
                                       prefix=prefix)
            if fns is None:
                # Wildcard found nothing see outdir/nofns_wildcard.txt
                continue
            cats = []
            for i, fn in enumerate(fns):
                print('reading %d/%d' % (i + 1, len(fns)))
                try:
                    tab = fits_table(fn)
                    cats.append(tab)
                except IOError:
                    print('Fits file does not exist: %s' % fn)
            cat = merge_tables(cats, columns='fillzero')
            cat.writeto(outfn)
            print('Wrote %s' % outfn)
def gather_arjuns_zpts(cpimage_list='bootes-90prime-abspath.txt',
                       name='arjuns-ccds-90prime.fits'):
    zpdir='/scratch2/scratchdirs/arjundey/ForKaylan'
    # Get list of cpimages want zeropoints files for
    cps=np.loadtxt(cpimage_list,dtype=str)
    cats=[]
    for i,cp in enumerate(cps):
        print('reading %d/%d' % (i+1,len(cps)))
        if '90prime' in name:
            zpname= os.path.basename(cp).replace('ksb','zeropoint-ksb').replace('.fits.fz','.fits')
        elif 'mzls' in name:
            zpname= os.path.basename(cp).replace('k4m','zeropoint-k4m').replace('.fits.fz','.fits')
        else:
            raise ValueError('%s' % name)
        zpname=os.path.join(zpdir,zpname)
        try:
            cats.append( fits_table(zpname) )
        except IOError:
            print('WARNING: Cannot read this file: %s' % zpname)
    bigcat=merge_tables(cats, columns='fillzero')
    bigcat.writeto(name)
def decals_run16():
    basedir = os.environ['LEGACY_SURVEY_DIR']
    cam = 'decam'
    image_basedir = os.path.join(basedir, 'images')
    TT = []

    for fn,dirnms in [
        ('/global/homes/a/arjundey/ZeroPoints/decals-zpt-20160709_20.fits',
         ['CP20160709', 'CP20160720']),
        ]:
        T = fits_table(fn)
        #T.cut(np.nonzero([e not in expnums for e in T.expnum])[0])
        normalize_zeropoints(fn, dirnms, image_basedir, cam, T=T)
        TT.append(T)
    T = merge_tables(TT)
    outfn = 'survey-ccds-run16.fits'
    T.writeto(outfn)
    print('Wrote', outfn)

    for fn in [outfn]:
        os.system('gzip --best ' + fn)
Example #34
0
def _get_decals_cat(wcs, tag='decals'):
    from astrometry.util.fits import fits_table, merge_tables
    from map.views import get_survey

    basedir = settings.DATA_DIR
    H, W = wcs.shape
    X = wcs.pixelxy2radec([1, 1, 1, W / 2, W, W, W, W / 2],
                          [1, H / 2, H, H, H, H / 2, 1, 1])
    r, d = X[-2:]

    #catpat = os.path.join(basedir, 'cats', tag, '%(brickname).3s',
    #                      'tractor-%(brickname)s.fits')

    survey = get_survey(tag)
    B = survey.get_bricks_readonly()
    I = survey.bricks_touching_radec_box(B, r.min(), r.max(), d.min(), d.max())
    #print(len(I), 'bricks touching RA,Dec box', r.min(),r.max(), d.min(),d.max())

    cat = []
    hdr = None
    for brickname in B.brickname[I]:
        catfn = survey.find_file('tractor', brick=brickname)
        if not os.path.exists(catfn):
            print('Does not exist:', catfn)
            continue
        debug('Reading catalog', catfn)
        T = fits_table(catfn)
        T.cut(T.brick_primary)
        print('File', catfn, 'cut to', len(T), 'primary')
        ok, xx, yy = wcs.radec2pixelxy(T.ra, T.dec)
        T.cut((xx > 0) * (yy > 0) * (xx < W) * (yy < H))
        cat.append(T)
        if hdr is None:
            hdr = T.get_header()
    if len(cat) == 0:
        cat = None
    else:
        cat = merge_tables(cat, columns='fillzero')

    return cat, hdr
Example #35
0
def _get_decals_cat(wcs, tag='decals'):
    from astrometry.util.fits import fits_table, merge_tables
    from map.views import get_survey

    H,W = wcs.shape
    X = wcs.pixelxy2radec([1,1,1,W/2,W,W,W,W/2],
                            [1,H/2,H,H,H,H/2,1,1])
    r,d = X[-2:]

    #catpat = os.path.join(basedir, 'cats', tag, '%(brickname).3s',
    #                      'tractor-%(brickname)s.fits')

    survey = get_survey(tag)
    B = survey.get_bricks_readonly()
    I = survey.bricks_touching_radec_box(B, r.min(), r.max(), d.min(), d.max())
    #print(len(I), 'bricks touching RA,Dec box', r.min(),r.max(), d.min(),d.max())

    cat = []
    hdr = None
    for brickname in B.brickname[I]:
        catfn = survey.find_file('tractor', brick=brickname)
        if not os.path.exists(catfn):
            print('Does not exist:', catfn)
            continue
        debug('Reading catalog', catfn)
        T = fits_table(catfn)
        T.cut(T.brick_primary)
        print('File', catfn, 'cut to', len(T), 'primary')
        ok,xx,yy = wcs.radec2pixelxy(T.ra, T.dec)
        T.cut((xx > 0) * (yy > 0) * (xx < W) * (yy < H))
        cat.append(T)
        if hdr is None:
            hdr = T.get_header()
    if len(cat) == 0:
        cat = None
    else:
        cat = merge_tables(cat, columns='fillzero')

    return cat,hdr
Example #36
0
def get_DR5_ccds(bricknames):
    path = '/global/cscratch1/sd/desiproc/DR5_out/'
    T = []
    for brick in bricknames:
        bri = brick[:3]
        ccd_fn = os.path.join(
            path,
            'coadd/%s/%s/legacysurvey-%s-ccds.fits' % (bri, brick, brick))
        try:
            t = fits_table(ccd_fn)
            t.set('brickname', np.array([brick] * len(t)))
            T.append(t)
            #ccd_fns.append(os.path.join(path,
            #                            'coadd/%s/%s/legacysurvey-%s-ccds.fits' %
            #                            (bri,brick,brickv))
        except IOError:
            print('not found: %s' % ccd_fn)
    TT = merge_tables(T, columns='fillzero')
    del T
    savefn = 'brick_allccds.fits'
    TT.writeto(savefn)
    print('Wrote %s' % savefn)
Example #37
0
def main_serial(doWhat=None, derived_dir=None, randoms_subset=False):
    """merges the rank tables that are stored in merge_tmp/"""
    saveDir = dir_for_serial(derived_dir)
    try:
        os.makedirs(saveDir)
    except OSError:
        pass

    if doWhat == 'randoms':
        wild = "randoms_rank*.fits"
        if randoms_subset:
            outfn = os.path.join(saveDir, 'randoms_subset.fits')
        else:
            outfn = os.path.join(saveDir, 'randoms.fits')
    elif doWhat == 'summary':
        wild = "summary_rank*.fits"
        outfn = os.path.join(saveDir, "summary.fits")

    if os.path.exists(outfn):
        print('Merged table already exists %s' % outfn)
        return

    search = os.path.join(dir_for_mpi(derived_dir), wild)
    tab_fns = glob(search)
    if len(tab_fns) == 0:
        raise ValueError('found nothing with search: %s' % search)
    tabs = []
    for fn in tab_fns:
        if randoms_subset:
            T = fits_table_cols(fn)
        else:
            T = fits_table(fn)
        tabs.append(T)
    print('Merging %d tables' % len(tabs))
    tab = merge_tables(tabs, columns='fillzero')
    tab.writeto(outfn)
    print('Wrote %s' % outfn)
    print('has %d rows' % len(tab))
Example #38
0
    def get_annotated_ccds(self):
        '''
        Returns the annotated table of CCDs.
        '''
        from glob import glob

        fns = self.find_file('annotated-ccds')
        TT = []
        for fn in fns:
            print('Reading annotated CCDs from', fn)
            T = fits_table(fn)
            print('Got', len(T), 'CCDs')
            TT.append(T)
        T = merge_tables(TT, columns='fillzero')
        print('Total of', len(T), 'CCDs')
        del TT
        # Remove trailing spaces from 'ccdname' column
        if 'ccdname' in T.columns():
            # "N4 " -> "N4"
            T.ccdname = np.array([s.strip() for s in T.ccdname])
        # Remove trailing spaces from 'camera' column.
        T.camera = np.array([c.strip() for c in T.camera])
        return T
Example #39
0
def main():
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('-o',
                        '--out',
                        dest='outfn',
                        help='Output filename',
                        default='TMP/nexp.fits')
    parser.add_argument('--merge',
                        action='store_true',
                        help='Merge sub-tables')
    parser.add_argument('--plot', action='store_true', help='Plot results')
    parser.add_argument('files',
                        metavar='nexp-file.fits.gz',
                        nargs='+',
                        help='List of nexp files to process')

    opt = parser.parse_args()

    fns = opt.files

    if opt.merge:
        from astrometry.util.fits import merge_tables
        TT = []
        for fn in fns:
            T = fits_table(fn)
            print(fn, '->', len(T))
            TT.append(T)
        T = merge_tables(TT)
        T.writeto(opt.outfn)
        print('Wrote', opt.outfn)

    if opt.plot:
        T = fits_table(opt.files[0])
        import pylab as plt
        import matplotlib

        ax = [360, 0, -21, 36]

        def radec_plot():
            plt.axis(ax)
            plt.xlabel('RA (deg)')
            plt.xticks(np.arange(0, 361, 45))
            plt.ylabel('Dec (deg)')

            gl = np.arange(361)
            gb = np.zeros_like(gl)
            from astrometry.util.starutil_numpy import lbtoradec
            rr, dd = lbtoradec(gl, gb)
            plt.plot(rr, dd, 'k-', alpha=0.5, lw=1)
            rr, dd = lbtoradec(gl, gb + 10)
            plt.plot(rr, dd, 'k-', alpha=0.25, lw=1)
            rr, dd = lbtoradec(gl, gb - 10)
            plt.plot(rr, dd, 'k-', alpha=0.25, lw=1)

        plt.figure(figsize=(8, 5))
        plt.subplots_adjust(left=0.1, right=0.98, top=0.93)

        # Map of the tile centers we want to observe...
        O = fits_table('obstatus/decam-tiles_obstatus.fits')
        O.cut(O.in_desi == 1)
        rr, dd = np.meshgrid(np.linspace(ax[1], ax[0], 700),
                             np.linspace(ax[2], ax[3], 200))
        from astrometry.libkd.spherematch import match_radec
        I, J, d = match_radec(O.ra, O.dec, rr.ravel(), dd.ravel(), 1.)
        desimap = np.zeros(rr.shape, bool)
        desimap.flat[J] = True

        def desi_map():
            # Show the DESI tile map in the background.
            from astrometry.util.plotutils import antigray
            plt.imshow(desimap,
                       origin='lower',
                       interpolation='nearest',
                       extent=[ax[1], ax[0], ax[2], ax[3]],
                       aspect='auto',
                       cmap=antigray,
                       vmax=8)

        for band in 'grz':
            plt.clf()
            desi_map()
            N = T.get('nexp_%s' % band)
            I = np.flatnonzero(N > 0)
            #cm = matplotlib.cm.get_cmap('jet', 6)
            #cm = matplotlib.cm.get_cmap('winter', 5)
            cm = matplotlib.cm.viridis
            cm = matplotlib.cm.get_cmap(cm, 5)
            plt.scatter(T.ra[I],
                        T.dec[I],
                        c=N[I],
                        s=2,
                        edgecolors='none',
                        vmin=0.5,
                        vmax=5.5,
                        cmap=cm)
            radec_plot()
            cax = colorbar_axes(plt.gca(), frac=0.06)
            plt.colorbar(cax=cax, ticks=range(6))
            #plt.colorbar(ticks=range(6))
            plt.title('DECaLS DR3: Number of exposures in %s' % band)
            plt.savefig('nexp-%s.png' % band)

            plt.clf()
            desi_map()
            plt.scatter(T.ra,
                        T.dec,
                        c=T.get('nexp_%s' % band),
                        s=2,
                        edgecolors='none',
                        vmin=0,
                        vmax=2.)
            radec_plot()
            plt.colorbar()
            plt.title('DECaLS DR3: PSF size, band %s' % band)
            plt.savefig('psfsize-%s.png' % band)

        return 0

        for col in ['nobjs', 'npsf', 'nsimp', 'nexp', 'ndev', 'ncomp']:
            plt.clf()
            desi_map()
            N = T.get(col)
            mx = np.percentile(N, 99.5)
            plt.scatter(T.ra,
                        T.dec,
                        c=N,
                        s=2,
                        edgecolors='none',
                        vmin=0,
                        vmax=mx)
            radec_plot()
            plt.colorbar()
            plt.title('DECaLS DR3: Number of objects of type %s' % col[1:])
            plt.savefig('nobjs-%s.png' % col[1:])

        Ntot = T.nobjs
        for col in ['npsf', 'nsimp', 'nexp', 'ndev', 'ncomp']:
            plt.clf()
            desi_map()
            N = T.get(col) / Ntot.astype(np.float32)
            mx = np.percentile(N, 99.5)
            plt.scatter(T.ra,
                        T.dec,
                        c=N,
                        s=2,
                        edgecolors='none',
                        vmin=0,
                        vmax=mx)
            radec_plot()
            plt.colorbar()
            plt.title('DECaLS DR3: Fraction of objects of type %s' % col[1:])
            plt.savefig('fobjs-%s.png' % col[1:])

        return 0

    # fnpats = opt.files
    # fns = []
    # for pat in fnpats:
    #     pfns = glob(pat)
    #     fns.extend(pfns)
    #     print('Pattern', pat, '->', len(pfns), 'files')
    #fns = glob('coadd/*/*/*-nexp*')
    #fns = glob('coadd/000/*/*-nexp*')
    #fns = glob('coadd/000/0001*/*-nexp*')
    fns.sort()
    print(len(fns), 'nexp files')

    brickset = set()
    bricklist = []
    gn = []
    rn = []
    zn = []

    gnhist = []
    rnhist = []
    znhist = []

    nnhist = 6

    gdepth = []
    rdepth = []
    zdepth = []

    ibricks = []
    nsrcs = []
    npsf = []
    nsimp = []
    nexp = []
    ndev = []
    ncomp = []

    gpsfsize = []
    rpsfsize = []
    zpsfsize = []
    ebv = []
    gtrans = []
    rtrans = []
    ztrans = []

    bricks = fits_table('survey-bricks.fits.gz')

    #sfd = SFDMap()

    W = H = 3600
    # H=3600
    # xx,yy = np.meshgrid(np.arange(W), np.arange(H))
    unique = np.ones((H, W), bool)
    tlast = 0

    for fn in fns:
        print('File', fn)
        words = fn.split('/')
        dirprefix = '/'.join(words[:-4])
        print('Directory prefix:', dirprefix)
        words = words[-4:]
        brick = words[2]
        print('Brick', brick)
        if not brick in brickset:
            brickset.add(brick)
            bricklist.append(brick)
            gn.append(0)
            rn.append(0)
            zn.append(0)

            gnhist.append([0 for i in range(nnhist)])
            rnhist.append([0 for i in range(nnhist)])
            znhist.append([0 for i in range(nnhist)])

            index = -1
            ibrick = np.nonzero(bricks.brickname == brick)[0][0]
            ibricks.append(ibrick)
            tfn = os.path.join(dirprefix, 'tractor', brick[:3],
                               'tractor-%s.fits' % brick)
            print('Tractor filename', tfn)
            T = fits_table(tfn,
                           columns=[
                               'brick_primary', 'type', 'decam_psfsize', 'ebv',
                               'decam_mw_transmission'
                           ])
            T.cut(T.brick_primary)
            nsrcs.append(len(T))
            types = Counter([t.strip() for t in T.type])
            npsf.append(types['PSF'])
            nsimp.append(types['SIMP'])
            nexp.append(types['EXP'])
            ndev.append(types['DEV'])
            ncomp.append(types['COMP'])
            print('N sources', nsrcs[-1])

            gpsfsize.append(np.median(T.decam_psfsize[:, 1]))
            rpsfsize.append(np.median(T.decam_psfsize[:, 2]))
            zpsfsize.append(np.median(T.decam_psfsize[:, 4]))

            ebv.append(np.median(T.ebv))
            gtrans.append(np.median(T.decam_mw_transmission[:, 1]))
            rtrans.append(np.median(T.decam_mw_transmission[:, 2]))
            ztrans.append(np.median(T.decam_mw_transmission[:, 4]))

            br = bricks[ibrick]

            print('Computing unique brick pixels...')
            #wcs = Tan(fn, 0)
            #W,H = int(wcs.get_width()), int(wcs.get_height())

            pixscale = 0.262 / 3600.
            wcs = Tan(br.ra, br.dec, W / 2. + 0.5, H / 2. + 0.5, -pixscale, 0.,
                      0., pixscale, float(W), float(H))
            import time

            t0 = time.clock()

            unique[:, :] = True

            find_unique_pixels(wcs, W, H, unique, br.ra1, br.ra2, br.dec1,
                               br.dec2)

            # for i in range(W/2):
            #     allin = True
            #     lo,hi = i, W-i-1
            #     # one slice per side
            #     side = slice(lo,hi+1)
            #     top = (lo, side)
            #     bot = (hi, side)
            #     left  = (side, lo)
            #     right = (side, hi)
            #     for slc in [top, bot, left, right]:
            #         #print('xx,yy', xx[slc], yy[slc])
            #         rr,dd = wcs.pixelxy2radec(xx[slc]+1, yy[slc]+1)
            #         U = (rr >= br.ra1 ) * (rr < br.ra2 ) * (dd >= br.dec1) * (dd < br.dec2)
            #         #print('Pixel', i, ':', np.sum(U), 'of', len(U), 'pixels are unique')
            #         allin *= np.all(U)
            #         unique[slc] = U
            #     if allin:
            #         print('Scanned to pixel', i)
            #         break

            t1 = time.clock()
            U = np.flatnonzero(unique)
            t2 = time.clock()
            print(len(U), 'of', W * H, 'pixels are unique to this brick')

            # #t3 = time.clock()
            #rr,dd = wcs.pixelxy2radec(xx+1, yy+1)
            # #t4 = time.clock()
            # #u = (rr >= br.ra1 ) * (rr < br.ra2 ) * (dd >= br.dec1) * (dd < br.dec2)
            # #t5 = time.clock()
            # #U2 = np.flatnonzero(u)
            #U2 = np.flatnonzero((rr >= br.ra1 ) * (rr < br.ra2 ) *
            #                    (dd >= br.dec1) * (dd < br.dec2))
            #assert(np.all(U == U2))
            #assert(len(U) == len(U2))
            # #t6 = time.clock()
            # print(len(U2), 'of', W*H, 'pixels are unique to this brick')
            #

            #print(t0-tlast, 'other time')
            #tlast = time.clock() #t2
            #print('t1:', t1-t0, 't2', t2-t1)

            # #print('t4:', t4-t3, 't5', t5-t4, 't6', t6-t5)
            #

        else:
            index = bricklist.index(brick)
            assert (index == len(bricklist) - 1)

        index = bricklist.index(brick)
        assert (index == len(bricklist) - 1)

        filepart = words[-1]
        filepart = filepart.replace('.fits.gz', '')
        print('File:', filepart)
        band = filepart[-1]
        assert (band in 'grz')

        nlist, nhist = dict(g=(gn, gnhist), r=(rn, rnhist),
                            z=(zn, znhist))[band]

        upix = fitsio.read(fn).flat[U]
        med = np.median(upix)
        print('Band', band, ': Median', med)
        nlist[index] = med

        hist = nhist[index]
        for i in range(nnhist):
            if i < nnhist - 1:
                hist[i] = np.sum(upix == i)
            else:
                hist[i] = np.sum(upix >= i)
        assert (sum(hist) == len(upix))
        print('Number of exposures histogram:', hist)

    ibricks = np.array(ibricks)

    print('Maximum number of sources:', max(nsrcs))

    T = fits_table()
    T.brickname = np.array(bricklist)
    T.ra = bricks.ra[ibricks]
    T.dec = bricks.dec[ibricks]
    T.nexp_g = np.array(gn).astype(np.int16)
    T.nexp_r = np.array(rn).astype(np.int16)
    T.nexp_z = np.array(zn).astype(np.int16)
    T.nexphist_g = np.array(gnhist).astype(np.int32)
    T.nexphist_r = np.array(rnhist).astype(np.int32)
    T.nexphist_z = np.array(znhist).astype(np.int32)
    T.nobjs = np.array(nsrcs).astype(np.int16)
    T.npsf = np.array(npsf).astype(np.int16)
    T.nsimp = np.array(nsimp).astype(np.int16)
    T.nexp = np.array(nexp).astype(np.int16)
    T.ndev = np.array(ndev).astype(np.int16)
    T.ncomp = np.array(ncomp).astype(np.int16)
    T.psfsize_g = np.array(gpsfsize).astype(np.float32)
    T.psfsize_r = np.array(rpsfsize).astype(np.float32)
    T.psfsize_z = np.array(zpsfsize).astype(np.float32)
    T.ebv = np.array(ebv).astype(np.float32)
    T.trans_g = np.array(gtrans).astype(np.float32)
    T.trans_r = np.array(rtrans).astype(np.float32)
    T.trans_z = np.array(ztrans).astype(np.float32)
    T.writeto(opt.outfn)
Example #40
0
def main(survey=None, opt=None):

    print(' '.join(sys.argv))
    '''Driver function for forced photometry of individual Legacy
    Survey images.
    '''
    if opt is None:
        parser = get_parser()
        opt = parser.parse_args()

    Time.add_measurement(MemMeas)
    t0 = tlast = Time()

    if opt.skip and os.path.exists(opt.outfn):
        print('Ouput file exists:', opt.outfn)
        sys.exit(0)

    if opt.derivs and opt.agn:
        print('Sorry, can\'t do --derivs AND --agn')
        sys.exit(0)

    if not opt.forced:
        opt.apphot = True

    zoomslice = None
    if opt.zoom is not None:
        (x0, x1, y0, y1) = opt.zoom
        zoomslice = (slice(y0, y1), slice(x0, x1))

    ps = None
    if opt.plots is not None:
        from astrometry.util.plotutils import PlotSequence
        ps = PlotSequence(opt.plots)

    # Try parsing first arg as exposure number (otherwise, it's a filename)
    try:
        expnum = int(opt.expnum)
        filename = None
    except:
        # make this 'None' for survey.find_ccds()
        expnum = None
        filename = opt.expnum

    # Try parsing HDU: "all" or HDU name or HDU number.
    all_hdus = (opt.ccdname == 'all')
    hdu = -1
    ccdname = None
    if not all_hdus:
        try:
            hdu = int(opt.ccdname)
        except:
            ccdname = opt.ccdname

    if survey is None:
        survey = LegacySurveyData(survey_dir=opt.survey_dir)

    catsurvey_north = survey
    catsurvey_south = None

    if opt.catalog_dir_north is not None:
        assert (opt.catalog_dir_south is not None)
        assert (opt.catalog_resolve_dec_ngc is not None)
        catsurvey_north = LegacySurveyData(survey_dir=opt.catalog_dir_north)
        catsurvey_south = LegacySurveyData(survey_dir=opt.catalog_dir_south)

    if opt.catalog_dir is not None:
        catsurvey_north = LegacySurveyData(survey_dir=opt.catalog_dir)

    if filename is not None and hdu >= 0:
        # FIXME -- try looking up in CCDs file?
        # Read metadata from file
        print('Warning: faking metadata from file contents')
        T = exposure_metadata([filename], hdus=[hdu])
        print('Metadata:')
        T.about()

        if not 'ccdzpt' in T.columns():
            phdr = fitsio.read_header(filename)
            T.ccdzpt = np.array([phdr['MAGZERO']])
            print('WARNING: using header MAGZERO')
            T.ccdraoff = np.array([0.])
            T.ccddecoff = np.array([0.])
            print('WARNING: setting CCDRAOFF, CCDDECOFF to zero.')

    else:
        # Read metadata from survey-ccds.fits table
        T = survey.find_ccds(expnum=expnum, ccdname=ccdname)
        print(len(T), 'with expnum', expnum, 'and ccdname', ccdname)
        if hdu >= 0:
            T.cut(T.image_hdu == hdu)
            print(len(T), 'with HDU', hdu)
        if filename is not None:
            T.cut(np.array([f.strip() == filename for f in T.image_filename]))
            print(len(T), 'with filename', filename)
        if opt.camera is not None:
            T.cut(T.camera == opt.camera)
            print(len(T), 'with camera', opt.camera)
        if not all_hdus:
            assert (len(T) == 1)

    args = []
    for ccd in T:
        args.append((survey, catsurvey_north, catsurvey_south,
                     opt.catalog_resolve_dec_ngc, ccd, opt, zoomslice, ps))

    if opt.threads:
        from astrometry.util.multiproc import multiproc
        from astrometry.util.timingpool import TimingPool, TimingPoolMeas
        pool = TimingPool(opt.threads)
        poolmeas = TimingPoolMeas(pool, pickleTraffic=False)
        Time.add_measurement(poolmeas)
        mp = multiproc(None, pool=pool)
        tm = Time()
        FF = mp.map(bounce_one_ccd, args)
        print('Multi-processing forced-phot:', Time() - tm)
    else:
        FF = map(bounce_one_ccd, args)

    FF = [F for F in FF if F is not None]
    if len(FF) == 0:
        print('No photometry results to write.')
        return 0
    # Keep only the first header
    _, version_hdr = FF[0]
    FF = [F for F, hdr in FF]
    F = merge_tables(FF)

    if all_hdus:
        version_hdr.delete('CPHDU')
        version_hdr.delete('CCDNAME')

    units = {
        'exptime': 'sec',
        'flux': 'nanomaggy',
        'flux_ivar': '1/nanomaggy^2',
        'apflux': 'nanomaggy',
        'apflux_ivar': '1/nanomaggy^2',
        'psfdepth': '1/nanomaggy^2',
        'galdepth': '1/nanomaggy^2',
        'sky': 'nanomaggy/arcsec^2',
        'psfsize': 'arcsec'
    }
    if opt.derivs:
        units.update({
            'dra': 'arcsec',
            'ddec': 'arcsec',
            'dra_ivar': '1/arcsec^2',
            'ddec_ivar': '1/arcsec^2'
        })

    columns = F.get_columns()
    order = [
        'release', 'brickid', 'brickname', 'objid', 'camera', 'expnum',
        'ccdname', 'filter', 'mjd', 'exptime', 'psfsize', 'ccd_cuts',
        'airmass', 'sky', 'psfdepth', 'galdepth', 'ra', 'dec', 'flux',
        'flux_ivar', 'fracflux', 'rchisq', 'fracmasked', 'apflux',
        'apflux_ivar', 'x', 'y', 'dqmask', 'dra', 'ddec', 'dra_ivar',
        'ddec_ivar'
    ]
    columns = [c for c in order if c in columns]

    # Set units headers (must happen after column ordering is set!)
    hdr = fitsio.FITSHDR()
    for i, col in enumerate(columns):
        if col in units:
            hdr.add_record(dict(name='TUNIT%i' % (i + 1), value=units[col]))

    outdir = os.path.dirname(opt.outfn)
    if len(outdir):
        trymakedirs(outdir)
    tmpfn = os.path.join(outdir, 'tmp-' + os.path.basename(opt.outfn))
    fitsio.write(tmpfn, None, header=version_hdr, clobber=True)
    F.writeto(tmpfn, header=hdr, append=True, columns=columns)
    os.rename(tmpfn, opt.outfn)
    print('Wrote', opt.outfn)

    tnow = Time()
    print('Total:', tnow - t0)
    return 0
Example #41
0
            meas = measure_raw_mosaic3(fn, ext=extstring, n_fwhm=1)
            #print('Measurement:', meas.keys())
            M = fits_table()
            for k in [
                    'airmass', 'extension', 'pixscale', 'nmatched', 'ra_ccd',
                    'dec_ccd', 'band', 'zp', 'rawsky', 'ndetected',
                    'skybright', 'dy', 'transparency', 'seeing', 'dx',
                    'exptime', 'zp_skysub', 'zp_med', 'zp_med_skysub', 'affine'
            ]:
                M.set(k, np.array([meas[k]]))
            M.filename = np.array([fn])
            M.extname = np.array([extstring])
            phdr = meas['primhdr']
            M.expnum = np.array([phdr['EXPNUM']])
            MM.append(M)
    M = merge_tables(MM)

    M.writeto(cofn)

C = fits_table(cofn)

from camera_mosaic import nominal_cal

nom = nominal_cal
C.extension = np.array([ext.strip() for ext in C.extension])
CDs = dict([(ext, nom.cdmatrix(ext)) for ext in np.unique(C.extension)])
C.cd = np.array([CDs[ext] for ext in C.extension])
C.dra = (C.cd[:, 0] * C.dx + C.cd[:, 1] * C.dy) * 3600.
C.ddec = (C.cd[:, 2] * C.dx + C.cd[:, 3] * C.dy) * 3600.

if not 'expnum' in C.columns():
Example #42
0
 def brick_cat_from_overlap(self,
                            surveyObj,
                            margin=10.,
                            purge_duplicates=True):
     '''
     Return a catalog of sources within bricks touching the overlap region 
     between the BASS and DECaLS CCDs. Does not cut sources to the overlap 
     region. 
     If purge_duplicates=True then duplicates (i.e. common sources from
     overlapping bricks) are removed based on depth.
     '''
     brks = self.bricks_in_overlap(surveyObj, margin=margin)
     if not len(brks):
         return None
     tractorDir = surveyObj.find_file('tractor').split('/')
     tractorDir = '/'.join(tractorDir[0:-2])
     tabs = []
     # merge brick catalogs
     for brk in brks:
         path = os.path.join(tractorDir, brk.brickname[0:3],
                             'tractor-' + brk.brickname + '.fits')
         tabs.append(fits_table(path))
     if len(tabs) == 1: return tabs[0]
     tab = merge_tables(tabs)
     if not (purge_duplicates):
         return tab
     # find groups of connected sources within a small radius
     # this is obviously not an exact method!
     grps = cluster_radec(tab.ra, tab.dec, 0.3 / 3600.)
     keep = np.ones(len(tab), dtype=bool)
     for grp in grps:
         brknms = tab.brickname[grp]
         # TODO a small number of srcs in the same brick will be
         # selected as duplicates!
         if len(grp) > 2:
             # if there are more than 2 sources in a group then call
             # the closest 2 duplicates
             ra = tab.ra[grp]
             dec = tab.dec[grp]
             m1, m2, d12 = match_radec(ra,
                                       dec,
                                       ra,
                                       dec,
                                       0.3 / 3600.,
                                       notself=True,
                                       nearest=True)
             i = np.argmin(d12)
             inxa = m1[i]
             inxb = m2[i]
             # chuck the one with larger ivar
             try:
                 dpth = np.array([
                     tab.get('psfdepth_' + self.filter)[inxa],
                     tab.get('psfdepth_' + self.filter)[inxb]
                 ])
             except KeyError:
                 b = 'ugrizY'.find(self.filter)
                 dpth = np.array([
                     tab.get('decam_depth')[inxa, b],
                     tab.get('decam_depth')[inxb, b]
                 ])
             j = np.argmin(dpth)
             keep[grp[np.array([inxa, inxb])[j]]] = False
         else:
             try:
                 dpth = tab.get('psfdepth_' + self.filter)[grp]
             except KeyError:
                 b = 'ugrizY'.find(self.filter)
                 dpth = np.array([
                     tab.get('decam_depth')[grp[0], b],
                     tab.get('decam_depth')[grp[1], b]
                 ])
             j = np.argmin(dpth)
             keep[grp[j]] = False
     if np.any(~keep):
         return tab[keep]
     return tab
def main():
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('--name1', help='Name for first data set')
    parser.add_argument('--name2', help='Name for second data set')
    parser.add_argument('--plot-prefix',
                        default='compare',
                        help='Prefix for plot filenames; default "%default"')
    parser.add_argument('--match',
                        default=1.0,
                        help='Astrometric cross-match distance in arcsec')
    parser.add_argument('dir1', help='First directory to compare')
    parser.add_argument('dir2', help='Second directory to compare')

    opt = parser.parse_args()

    ps = PlotSequence(opt.plot_prefix)

    name1 = opt.name1
    if name1 is None:
        name1 = os.path.basename(opt.dir1)
        if not len(name1):
            name1 = os.path.basename(os.path.dirname(opt.dir1))
    name2 = opt.name2
    if name2 is None:
        name2 = os.path.basename(opt.dir2)
        if not len(name2):
            name2 = os.path.basename(os.path.dirname(opt.dir2))
    tt = 'Comparing %s to %s' % (name1, name2)

    # regex for tractor-*.fits catalog filename
    catre = re.compile('tractor-.*.fits')

    cat1, cat2 = [], []
    for basedir, cat in [(opt.dir1, cat1), (opt.dir2, cat2)]:
        for dirpath, dirnames, filenames in os.walk(basedir, followlinks=True):
            for fn in filenames:
                if not catre.match(fn):
                    print('Skipping', fn, 'due to filename')
                    continue
                fn = os.path.join(dirpath, fn)
                t = fits_table(fn)
                print(len(t), 'from', fn)
                cat.append(t)
    cat1 = merge_tables(cat1, columns='fillzero')
    cat2 = merge_tables(cat2, columns='fillzero')
    print('Total of', len(cat1), 'from', name1)
    print('Total of', len(cat2), 'from', name2)
    cat1.cut(cat1.brick_primary)
    cat2.cut(cat2.brick_primary)
    print('Total of', len(cat1), 'BRICK_PRIMARY from', name1)
    print('Total of', len(cat2), 'BRICK_PRIMARY from', name2)

    cat1.cut((cat1.decam_anymask[:, 1] == 0) *
             (cat1.decam_anymask[:, 2] == 0) * (cat1.decam_anymask[:, 4] == 0))
    cat2.cut((cat2.decam_anymask[:, 1] == 0) *
             (cat2.decam_anymask[:, 2] == 0) * (cat2.decam_anymask[:, 4] == 0))
    print('Total of', len(cat1), 'unmasked from', name1)
    print('Total of', len(cat2), 'unmasked from', name2)

    I, J, d = match_radec(cat1.ra,
                          cat1.dec,
                          cat2.ra,
                          cat2.dec,
                          opt.match / 3600.,
                          nearest=True)
    print(len(I), 'matched')

    plt.clf()
    plt.hist(d * 3600., 100)
    plt.xlabel('Match distance (arcsec)')
    plt.title(tt)
    ps.savefig()

    matched1 = cat1[I]
    matched2 = cat2[J]

    for iband, band, cc in [(1, 'g', 'g'), (2, 'r', 'r'), (4, 'z', 'm')]:
        K = np.flatnonzero((matched1.decam_flux_ivar[:, iband] > 0) *
                           (matched2.decam_flux_ivar[:, iband] > 0))

        print('Median mw_trans', band, 'is',
              np.median(matched1.decam_mw_transmission[:, iband]))

        plt.clf()
        plt.errorbar(
            matched1.decam_flux[K, iband],
            matched2.decam_flux[K, iband],
            fmt='.',
            color=cc,
            xerr=1. / np.sqrt(matched1.decam_flux_ivar[K, iband]),
            yerr=1. / np.sqrt(matched2.decam_flux_ivar[K, iband]),
            alpha=0.1,
        )
        plt.xlabel('%s flux: %s' % (name1, band))
        plt.ylabel('%s flux: %s' % (name2, band))
        plt.plot([-1e6, 1e6], [-1e6, 1e6], 'k-', alpha=1.)
        plt.axis([-100, 1000, -100, 1000])
        plt.title(tt)
        ps.savefig()

    for iband, band, cc in [(1, 'g', 'g'), (2, 'r', 'r'), (4, 'z', 'm')]:
        good = ((matched1.decam_flux_ivar[:, iband] > 0) *
                (matched2.decam_flux_ivar[:, iband] > 0))
        K = np.flatnonzero(good)
        psf1 = (matched1.type == 'PSF ')
        psf2 = (matched2.type == 'PSF ')
        P = np.flatnonzero(good * psf1 * psf2)

        mag1, magerr1 = NanoMaggies.fluxErrorsToMagErrors(
            matched1.decam_flux[:, iband], matched1.decam_flux_ivar[:, iband])

        iv1 = matched1.decam_flux_ivar[:, iband]
        iv2 = matched2.decam_flux_ivar[:, iband]
        std = np.sqrt(1. / iv1 + 1. / iv2)

        plt.clf()
        plt.plot(
            mag1[K],
            (matched2.decam_flux[K, iband] - matched1.decam_flux[K, iband]) /
            std[K],
            '.',
            alpha=0.1,
            color=cc)
        plt.plot(
            mag1[P],
            (matched2.decam_flux[P, iband] - matched1.decam_flux[P, iband]) /
            std[P],
            '.',
            alpha=0.1,
            color='k')
        plt.ylabel('(%s - %s) flux / flux errors (sigma): %s' %
                   (name2, name1, band))
        plt.xlabel('%s mag: %s' % (name1, band))
        plt.axhline(0, color='k', alpha=0.5)
        plt.axis([24, 16, -10, 10])
        plt.title(tt)
        ps.savefig()

    plt.clf()
    lp, lt = [], []
    for iband, band, cc in [(1, 'g', 'g'), (2, 'r', 'r'), (4, 'z', 'm')]:
        good = ((matched1.decam_flux_ivar[:, iband] > 0) *
                (matched2.decam_flux_ivar[:, iband] > 0))
        #good = True
        psf1 = (matched1.type == 'PSF ')
        psf2 = (matched2.type == 'PSF ')
        mag1, magerr1 = NanoMaggies.fluxErrorsToMagErrors(
            matched1.decam_flux[:, iband], matched1.decam_flux_ivar[:, iband])
        iv1 = matched1.decam_flux_ivar[:, iband]
        iv2 = matched2.decam_flux_ivar[:, iband]
        std = np.sqrt(1. / iv1 + 1. / iv2)
        #std = np.hypot(std, 0.01)
        G = np.flatnonzero(good * psf1 * psf2 * np.isfinite(mag1) *
                           (mag1 >= 20) *
                           (mag1 < dict(g=24, r=23.5, z=22.5)[band]))

        n, b, p = plt.hist(
            (matched2.decam_flux[G, iband] - matched1.decam_flux[G, iband]) /
            std[G],
            range=(-4, 4),
            bins=50,
            histtype='step',
            color=cc,
            normed=True)

        sig = (matched2.decam_flux[G, iband] -
               matched1.decam_flux[G, iband]) / std[G]
        print('Raw mean and std of points:', np.mean(sig), np.std(sig))
        med = np.median(sig)
        rsigma = (np.percentile(sig, 84) - np.percentile(sig, 16)) / 2.
        print('Median and percentile-based sigma:', med, rsigma)
        lp.append(p[0])
        lt.append('%s: %.2f +- %.2f' % (band, med, rsigma))

    bins = []
    gaussint = []
    for blo, bhi in zip(b, b[1:]):
        c = scipy.stats.norm.cdf(bhi) - scipy.stats.norm.cdf(blo)
        c /= (bhi - blo)
        #bins.extend([blo,bhi])
        #gaussint.extend([c,c])
        bins.append((blo + bhi) / 2.)
        gaussint.append(c)
    plt.plot(bins, gaussint, 'k-', lw=2, alpha=0.5)

    plt.title(tt)
    plt.xlabel('Flux difference / error (sigma)')
    plt.axvline(0, color='k', alpha=0.1)
    plt.ylim(0, 0.45)
    plt.legend(lp, lt, loc='upper right')
    ps.savefig()

    for iband, band, cc in [(1, 'g', 'g'), (2, 'r', 'r'), (4, 'z', 'm')]:
        plt.clf()
        mag1, magerr1 = NanoMaggies.fluxErrorsToMagErrors(
            matched1.decam_flux[:, iband], matched1.decam_flux_ivar[:, iband])
        mag2, magerr2 = NanoMaggies.fluxErrorsToMagErrors(
            matched2.decam_flux[:, iband], matched2.decam_flux_ivar[:, iband])

        meanmag = NanoMaggies.nanomaggiesToMag(
            (matched1.decam_flux[:, iband] + matched2.decam_flux[:, iband]) /
            2.)

        psf1 = (matched1.type == 'PSF ')
        psf2 = (matched2.type == 'PSF ')
        good = ((matched1.decam_flux_ivar[:, iband] > 0) *
                (matched2.decam_flux_ivar[:, iband] > 0) * np.isfinite(mag1) *
                np.isfinite(mag2))
        K = np.flatnonzero(good)
        P = np.flatnonzero(good * psf1 * psf2)

        plt.errorbar(mag1[K],
                     mag2[K],
                     fmt='.',
                     color=cc,
                     xerr=magerr1[K],
                     yerr=magerr2[K],
                     alpha=0.1)
        plt.plot(mag1[P], mag2[P], 'k.', alpha=0.5)
        plt.xlabel('%s %s (mag)' % (name1, band))
        plt.ylabel('%s %s (mag)' % (name2, band))
        plt.plot([-1e6, 1e6], [-1e6, 1e6], 'k-', alpha=1.)
        plt.axis([24, 16, 24, 16])
        plt.title(tt)
        ps.savefig()

        plt.clf()
        plt.errorbar(mag1[K],
                     mag2[K] - mag1[K],
                     fmt='.',
                     color=cc,
                     xerr=magerr1[K],
                     yerr=magerr2[K],
                     alpha=0.1)
        plt.plot(mag1[P], mag2[P] - mag1[P], 'k.', alpha=0.5)
        plt.xlabel('%s %s (mag)' % (name1, band))
        plt.ylabel('%s %s - %s %s (mag)' % (name2, band, name1, band))
        plt.axhline(0., color='k', alpha=1.)
        plt.axis([24, 16, -1, 1])
        plt.title(tt)
        ps.savefig()

        magbins = np.arange(16, 24.001, 0.5)

        plt.clf()
        plt.plot(mag1[K],
                 (mag2[K] - mag1[K]) / np.hypot(magerr1[K], magerr2[K]),
                 '.',
                 color=cc,
                 alpha=0.1)
        plt.plot(mag1[P],
                 (mag2[P] - mag1[P]) / np.hypot(magerr1[P], magerr2[P]),
                 'k.',
                 alpha=0.5)

        plt.xlabel('%s %s (mag)' % (name1, band))
        plt.ylabel('(%s %s - %s %s) / errors (sigma)' %
                   (name2, band, name1, band))
        plt.axhline(0., color='k', alpha=1.)
        plt.axis([24, 16, -10, 10])
        plt.title(tt)
        ps.savefig()

        y = (mag2 - mag1) / np.hypot(magerr1, magerr2)

        plt.clf()
        plt.plot(meanmag[P], y[P], 'k.', alpha=0.1)

        midmag = []
        vals = np.zeros((len(magbins) - 1, 5))
        median_err1 = []

        iqd_gauss = scipy.stats.norm.ppf(0.75) - scipy.stats.norm.ppf(0.25)

        # FIXME -- should we do some stats after taking off the mean difference?

        for bini, (mlo, mhi) in enumerate(zip(magbins, magbins[1:])):
            I = P[(meanmag[P] >= mlo) * (meanmag[P] < mhi)]
            midmag.append((mlo + mhi) / 2.)
            median_err1.append(np.median(magerr1[I]))
            if len(I) == 0:
                continue
            # median and +- 1 sigma quantiles
            ybin = y[I]
            vals[bini, 0] = np.percentile(ybin, 16)
            vals[bini, 1] = np.median(ybin)
            vals[bini, 2] = np.percentile(ybin, 84)
            # +- 2 sigma quantiles
            vals[bini, 3] = np.percentile(ybin, 2.3)
            vals[bini, 4] = np.percentile(ybin, 97.7)

            iqd = np.percentile(ybin, 75) - np.percentile(ybin, 25)

            print('Mag bin', midmag[-1], ': IQD is factor', iqd / iqd_gauss,
                  'vs expected for Gaussian;', len(ybin), 'points')

            # if iqd > iqd_gauss:
            #     # What error adding in quadrature would you need to make the IQD match?
            #     err = median_err1[-1]
            #     target_err = err * (iqd / iqd_gauss)
            #     sys_err = np.sqrt(target_err**2 - err**2)
            #     print('--> add systematic error', sys_err)

        # ~ Johan's cuts
        mlo = 21.
        mhi = dict(g=24., r=23.5, z=22.5)[band]
        I = P[(meanmag[P] >= mlo) * (meanmag[P] < mhi)]
        ybin = y[I]
        iqd = np.percentile(ybin, 75) - np.percentile(ybin, 25)
        print('Mag bin', mlo, mhi, 'band', band,
              ': IQD is factor', iqd / iqd_gauss, 'vs expected for Gaussian;',
              len(ybin), 'points')
        if iqd > iqd_gauss:
            # What error adding in quadrature would you need to make
            # the IQD match?
            err = np.median(np.hypot(magerr1[I], magerr2[I]))
            print('Median error (hypot):', err)
            target_err = err * (iqd / iqd_gauss)
            print('Target:', target_err)
            sys_err = np.sqrt((target_err**2 - err**2) / 2.)
            print('--> add systematic error', sys_err)

            # check...
            err_sys = np.hypot(np.hypot(magerr1, sys_err),
                               np.hypot(magerr2, sys_err))
            ysys = (mag2 - mag1) / err_sys
            ysys = ysys[I]
            print('Resulting median error:', np.median(err_sys[I]))
            iqd_sys = np.percentile(ysys, 75) - np.percentile(ysys, 25)
            print('--> IQD', iqd_sys / iqd_gauss, 'vs Gaussian')
            # Hmmm, this doesn't work... totally overshoots.

        plt.errorbar(midmag,
                     vals[:, 1],
                     fmt='o',
                     color='b',
                     yerr=(vals[:, 1] - vals[:, 0], vals[:, 2] - vals[:, 1]),
                     capthick=3,
                     zorder=20)
        plt.errorbar(midmag,
                     vals[:, 1],
                     fmt='o',
                     color='b',
                     yerr=(vals[:, 1] - vals[:, 3], vals[:, 4] - vals[:, 1]),
                     capthick=2,
                     zorder=20)
        plt.axhline(1., color='b', alpha=0.2)
        plt.axhline(-1., color='b', alpha=0.2)
        plt.axhline(2., color='b', alpha=0.2)
        plt.axhline(-2., color='b', alpha=0.2)

        for mag, err, y in zip(midmag, median_err1, vals[:, 3]):
            if not np.isfinite(err):
                continue
            if y < -6:
                continue
            plt.text(mag,
                     y - 0.1,
                     '%.3f' % err,
                     va='top',
                     ha='center',
                     color='k',
                     fontsize=10)

        plt.xlabel('(%s + %s)/2 %s (mag), PSFs' % (name1, name2, band))
        plt.ylabel('(%s %s - %s %s) / errors (sigma)' %
                   (name2, band, name1, band))
        plt.axhline(0., color='k', alpha=1.)

        plt.axvline(21, color='k', alpha=0.3)
        plt.axvline(dict(g=24, r=23.5, z=22.5)[band], color='k', alpha=0.3)

        plt.axis([24.1, 16, -6, 6])
        plt.title(tt)
        ps.savefig()

        #magbins = np.append([16, 18], np.arange(20, 24.001, 0.5))
        if band == 'g':
            magbins = [20, 24]
        elif band == 'r':
            magbins = [20, 23.5]
        elif band == 'z':
            magbins = [20, 22.5]

        slo, shi = -5, 5
        plt.clf()
        ha = dict(bins=25, range=(slo, shi), histtype='step', normed=True)
        y = (mag2 - mag1) / np.hypot(magerr1, magerr2)
        midmag = []
        nn = []
        rgbs = []
        lt, lp = [], []
        for bini, (mlo, mhi) in enumerate(zip(magbins, magbins[1:])):
            I = P[(mag1[P] >= mlo) * (mag1[P] < mhi)]
            if len(I) == 0:
                continue
            ybin = y[I]
            rgb = [0., 0., 0.]
            rgb[0] = float(bini) / (len(magbins) - 1)
            rgb[2] = 1. - rgb[0]
            n, b, p = plt.hist(ybin, color=rgb, **ha)
            lt.append('mag %g to %g' % (mlo, mhi))
            lp.append(p[0])
            midmag.append((mlo + mhi) / 2.)
            nn.append(n)
            rgbs.append(rgb)

        bins = []
        gaussint = []
        for blo, bhi in zip(b, b[1:]):
            #midbin.append((blo+bhi)/2.)
            #gaussint.append(scipy.stats.norm.cdf(bhi) -
            #                scipy.stats.norm.cdf(blo))
            c = scipy.stats.norm.cdf(bhi) - scipy.stats.norm.cdf(blo)
            c /= (bhi - blo)
            bins.extend([blo, bhi])
            gaussint.extend([c, c])
        plt.plot(bins, gaussint, 'k-', lw=2, alpha=0.5)

        plt.legend(lp, lt)
        plt.title(tt)
        plt.xlim(slo, shi)
        ps.savefig()

        bincenters = b[:-1] + (b[1] - b[0]) / 2.
        plt.clf()
        lp = []
        for n, rgb, mlo, mhi in zip(nn, rgbs, magbins, magbins[1:]):
            p = plt.plot(bincenters, n, '-', color=rgb)
            lp.append(p[0])
        plt.plot(bincenters, gaussint[::2], 'k-', alpha=0.5, lw=2)
        plt.legend(lp, lt)
        plt.title(tt)
        plt.xlim(slo, shi)
        ps.savefig()
Example #44
0
def main():

    # one_file('/global/cfs/cdirs/cosmo/staging/decam/CP/V4.8.2a/CP20190719/c4d_190720_102549_oow_r_v1.fits.fz')
    # one_file('/global/cfs/cdirs/cosmo/staging/decam/CP/V4.8.2a/CP20190719/c4d_190720_100244_oow_g_v1.fits.fz')
    # one_file('/global/cfs/cdirs/cosmo/staging/decam/CP/V4.8.2a/CP20190719/c4d_190720_100433_oow_g_v1.fits.fz')
    # one_file('/global/cfs/cdirs/cosmo/staging/decam/CP/V4.8.2a/CP20190719/c4d_190720_102401_oow_g_v1.fits.fz')
    # one_file('/global/cfs/cdirs/cosmo/staging/decam/CP/V4.8.2a/CP20190719/c4d_190720_100622_oow_r_v1.fits.fz')
    # one_file('/global/cfs/cdirs/cosmo/staging/decam/CP/V4.8.2a/CP20190719/c4d_190720_100802_oow_r_v1.fits.fz')
    # one_file('/global/cfs/cdirs/cosmo/staging/decam/CP/V4.8.2a/CP20190719/c4d_190720_102727_oow_r_v1.fits.fz')
    # one_file('/global/cfs/cdirs/cosmo/staging/decam/CP/V4.8.2a/CP20190719/c4d_190720_102212_oow_g_v1.fits.fz')
    # return

    if True:
        #dirs = glob(dirprefix + '90prime/CP/V2.3/CP*')
        dirs = glob(dirprefix + 'mosaic/CP/V4.3/CP*')
        #dirs = glob(dirprefix + 'mosaic/CP/V4.3/CP2017*')
        #dirs = (glob(dirprefix + 'mosaic/CP/V4.3/CP2015*') +
        #        glob(dirprefix + 'mosaic/CP/V4.3/CP2016*'))
        #dirs = glob(dirprefix + 'decam/CP/V4.8.2a/CP*')
        dirs.sort()
        dirs = list(reversed(dirs))
    
        keepdirs = []
        for dirnm in dirs:
            outfn = 'oow-stats-' + '-'.join(dirnm.split('/')[-4:]) + '.fits'
            if os.path.exists(outfn):
                print('skipping', outfn)
                continue
            keepdirs.append(dirnm)
        dirs = keepdirs
    
        print('Directories to run:')
        for dirnm in dirs:
            print('  ', dirnm)

    # import argparse
    # parser = argparse.ArgumentParser()
    # parser.add_argument('dirs', nargs='+',
    #                     help='Directories to process')
    # args = parser.parse_args()
    # 
    # dirs = args.dirs
    # print('Dirs:', dirs)
    
    mp = multiproc(32)
    #mp = multiproc(1)
    
    for dirnm in dirs:
        print('Dir', dirnm)
        outfn = 'oow-stats-' + '-'.join(dirnm.split('/')[-4:]) + '.fits'
        print('looking for', outfn)
        if os.path.exists(outfn):
            print('skipping', outfn)
            continue
        pat =  os.path.join(dirnm, '*_oow_*.fits.fz')
        print('Pattern', pat)
        fns = glob(pat)
        fns.sort()
        print(len(fns), 'oow files')

        TT = mp.map(one_file, fns)
        T = merge_tables(TT)
        T.writeto(outfn)
        print('Wrote', outfn)
def main():
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('--name1', help='Name for first data set')
    parser.add_argument('--name2', help='Name for second data set')
    parser.add_argument('--plot-prefix', default='compare',
                        help='Prefix for plot filenames; default "%default"')
    parser.add_argument('--match', default=1.0,
                        help='Astrometric cross-match distance in arcsec')
    parser.add_argument('dir1', help='First directory to compare')
    parser.add_argument('dir2', help='Second directory to compare')

    opt = parser.parse_args()
    
    ps = PlotSequence(opt.plot_prefix)

    name1 = opt.name1
    if name1 is None:
        name1 = os.path.basename(opt.dir1)
        if not len(name1):
            name1 = os.path.basename(os.path.dirname(opt.dir1))
    name2 = opt.name2
    if name2 is None:
        name2 = os.path.basename(opt.dir2)
        if not len(name2):
            name2 = os.path.basename(os.path.dirname(opt.dir2))
    tt = 'Comparing %s to %s' % (name1, name2)

    # regex for tractor-*.fits catalog filename
    catre = re.compile('tractor-.*.fits')
        
    cat1,cat2 = [],[]
    for basedir,cat in [(opt.dir1, cat1), (opt.dir2, cat2)]:
        for dirpath,dirnames,filenames in os.walk(basedir, followlinks=True):
            for fn in filenames:
                if not catre.match(fn):
                    print('Skipping', fn, 'due to filename')
                    continue
                fn = os.path.join(dirpath, fn)
                t = fits_table(fn)
                print(len(t), 'from', fn)
                cat.append(t)
    cat1 = merge_tables(cat1, columns='fillzero')
    cat2 = merge_tables(cat2, columns='fillzero')
    print('Total of', len(cat1), 'from', name1)
    print('Total of', len(cat2), 'from', name2)
    cat1.cut(cat1.brick_primary)
    cat2.cut(cat2.brick_primary)
    print('Total of', len(cat1), 'BRICK_PRIMARY from', name1)
    print('Total of', len(cat2), 'BRICK_PRIMARY from', name2)

    cat1.cut((cat1.decam_anymask[:,1] == 0) *
             (cat1.decam_anymask[:,2] == 0) *
             (cat1.decam_anymask[:,4] == 0))
    cat2.cut((cat2.decam_anymask[:,1] == 0) *
             (cat2.decam_anymask[:,2] == 0) *
             (cat2.decam_anymask[:,4] == 0))
    print('Total of', len(cat1), 'unmasked from', name1)
    print('Total of', len(cat2), 'unmasked from', name2)
    
    I,J,d = match_radec(cat1.ra, cat1.dec, cat2.ra, cat2.dec, opt.match/3600.,
                        nearest=True)
    print(len(I), 'matched')

    plt.clf()
    plt.hist(d * 3600., 100)
    plt.xlabel('Match distance (arcsec)')
    plt.title(tt)
    ps.savefig()

    matched1 = cat1[I]
    matched2 = cat2[J]

    for iband,band,cc in [(1,'g','g'),(2,'r','r'),(4,'z','m')]:
        K = np.flatnonzero((matched1.decam_flux_ivar[:,iband] > 0) *
                           (matched2.decam_flux_ivar[:,iband] > 0))
        
        print('Median mw_trans', band, 'is',
              np.median(matched1.decam_mw_transmission[:,iband]))
        
        plt.clf()
        plt.errorbar(matched1.decam_flux[K,iband],
                     matched2.decam_flux[K,iband],
                     fmt='.', color=cc,
                     xerr=1./np.sqrt(matched1.decam_flux_ivar[K,iband]),
                     yerr=1./np.sqrt(matched2.decam_flux_ivar[K,iband]),
                     alpha=0.1,
                     )
        plt.xlabel('%s flux: %s' % (name1, band))
        plt.ylabel('%s flux: %s' % (name2, band))
        plt.plot([-1e6, 1e6], [-1e6,1e6], 'k-', alpha=1.)
        plt.axis([-100, 1000, -100, 1000])
        plt.title(tt)
        ps.savefig()


    for iband,band,cc in [(1,'g','g'),(2,'r','r'),(4,'z','m')]:
        good = ((matched1.decam_flux_ivar[:,iband] > 0) *
                (matched2.decam_flux_ivar[:,iband] > 0))
        K = np.flatnonzero(good)
        psf1 = (matched1.type == 'PSF ')
        psf2 = (matched2.type == 'PSF ')
        P = np.flatnonzero(good * psf1 * psf2)

        mag1, magerr1 = NanoMaggies.fluxErrorsToMagErrors(
            matched1.decam_flux[:,iband], matched1.decam_flux_ivar[:,iband])
        
        iv1 = matched1.decam_flux_ivar[:, iband]
        iv2 = matched2.decam_flux_ivar[:, iband]
        std = np.sqrt(1./iv1 + 1./iv2)
        
        plt.clf()
        plt.plot(mag1[K],
                 (matched2.decam_flux[K,iband] - matched1.decam_flux[K,iband]) / std[K],
                 '.', alpha=0.1, color=cc)
        plt.plot(mag1[P],
                 (matched2.decam_flux[P,iband] - matched1.decam_flux[P,iband]) / std[P],
                 '.', alpha=0.1, color='k')
        plt.ylabel('(%s - %s) flux / flux errors (sigma): %s' % (name2, name1, band))
        plt.xlabel('%s mag: %s' % (name1, band))
        plt.axhline(0, color='k', alpha=0.5)
        plt.axis([24, 16, -10, 10])
        plt.title(tt)
        ps.savefig()

    plt.clf()
    lp,lt = [],[]
    for iband,band,cc in [(1,'g','g'),(2,'r','r'),(4,'z','m')]:
        good = ((matched1.decam_flux_ivar[:,iband] > 0) *
                (matched2.decam_flux_ivar[:,iband] > 0))
        #good = True
        psf1 = (matched1.type == 'PSF ')
        psf2 = (matched2.type == 'PSF ')
        mag1, magerr1 = NanoMaggies.fluxErrorsToMagErrors(
            matched1.decam_flux[:,iband], matched1.decam_flux_ivar[:,iband])
        iv1 = matched1.decam_flux_ivar[:, iband]
        iv2 = matched2.decam_flux_ivar[:, iband]
        std = np.sqrt(1./iv1 + 1./iv2)
        #std = np.hypot(std, 0.01)
        G = np.flatnonzero(good * psf1 * psf2 *
                           np.isfinite(mag1) *
                           (mag1 >= 20) * (mag1 < dict(g=24, r=23.5, z=22.5)[band]))
        
        n,b,p = plt.hist((matched2.decam_flux[G,iband] -
                          matched1.decam_flux[G,iband]) / std[G],
                 range=(-4, 4), bins=50, histtype='step', color=cc,
                 normed=True)

        sig = (matched2.decam_flux[G,iband] -
               matched1.decam_flux[G,iband]) / std[G]
        print('Raw mean and std of points:', np.mean(sig), np.std(sig))
        med = np.median(sig)
        rsigma = (np.percentile(sig, 84) - np.percentile(sig, 16)) / 2.
        print('Median and percentile-based sigma:', med, rsigma)
        lp.append(p[0])
        lt.append('%s: %.2f +- %.2f' % (band, med, rsigma))
        
    bins = []
    gaussint = []
    for blo,bhi in zip(b, b[1:]):
        c = scipy.stats.norm.cdf(bhi) - scipy.stats.norm.cdf(blo)
        c /= (bhi - blo)
        #bins.extend([blo,bhi])
        #gaussint.extend([c,c])
        bins.append((blo+bhi)/2.)
        gaussint.append(c)
    plt.plot(bins, gaussint, 'k-', lw=2, alpha=0.5)
    
    plt.title(tt)
    plt.xlabel('Flux difference / error (sigma)')
    plt.axvline(0, color='k', alpha=0.1)
    plt.ylim(0, 0.45)
    plt.legend(lp, lt, loc='upper right')
    ps.savefig()
        
        
    for iband,band,cc in [(1,'g','g'),(2,'r','r'),(4,'z','m')]:
        plt.clf()
        mag1, magerr1 = NanoMaggies.fluxErrorsToMagErrors(
            matched1.decam_flux[:,iband], matched1.decam_flux_ivar[:,iband])
        mag2, magerr2 = NanoMaggies.fluxErrorsToMagErrors(
            matched2.decam_flux[:,iband], matched2.decam_flux_ivar[:,iband])

        meanmag = NanoMaggies.nanomaggiesToMag((
            matched1.decam_flux[:,iband] + matched2.decam_flux[:,iband]) / 2.)

        psf1 = (matched1.type == 'PSF ')
        psf2 = (matched2.type == 'PSF ')
        good = ((matched1.decam_flux_ivar[:,iband] > 0) *
                (matched2.decam_flux_ivar[:,iband] > 0) *
                np.isfinite(mag1) * np.isfinite(mag2))
        K = np.flatnonzero(good)
        P = np.flatnonzero(good * psf1 * psf2)
        
        plt.errorbar(mag1[K], mag2[K], fmt='.', color=cc,
                     xerr=magerr1[K], yerr=magerr2[K], alpha=0.1)
        plt.plot(mag1[P], mag2[P], 'k.', alpha=0.5)
        plt.xlabel('%s %s (mag)' % (name1, band))
        plt.ylabel('%s %s (mag)' % (name2, band))
        plt.plot([-1e6, 1e6], [-1e6,1e6], 'k-', alpha=1.)
        plt.axis([24, 16, 24, 16])
        plt.title(tt)
        ps.savefig()

        plt.clf()
        plt.errorbar(mag1[K], mag2[K] - mag1[K], fmt='.', color=cc,
                     xerr=magerr1[K], yerr=magerr2[K], alpha=0.1)
        plt.plot(mag1[P], mag2[P] - mag1[P], 'k.', alpha=0.5)
        plt.xlabel('%s %s (mag)' % (name1, band))
        plt.ylabel('%s %s - %s %s (mag)' % (name2, band, name1, band))
        plt.axhline(0., color='k', alpha=1.)
        plt.axis([24, 16, -1, 1])
        plt.title(tt)
        ps.savefig()

        magbins = np.arange(16, 24.001, 0.5)
        
        plt.clf()
        plt.plot(mag1[K], (mag2[K]-mag1[K]) / np.hypot(magerr1[K], magerr2[K]),
                     '.', color=cc, alpha=0.1)
        plt.plot(mag1[P], (mag2[P]-mag1[P]) / np.hypot(magerr1[P], magerr2[P]),
                     'k.', alpha=0.5)

        plt.xlabel('%s %s (mag)' % (name1, band))
        plt.ylabel('(%s %s - %s %s) / errors (sigma)' %
                   (name2, band, name1, band))
        plt.axhline(0., color='k', alpha=1.)
        plt.axis([24, 16, -10, 10])
        plt.title(tt)
        ps.savefig()

        y = (mag2 - mag1) / np.hypot(magerr1, magerr2)
        
        plt.clf()
        plt.plot(meanmag[P], y[P], 'k.', alpha=0.1)

        midmag = []
        vals = np.zeros((len(magbins)-1, 5))
        median_err1 = []
        
        iqd_gauss = scipy.stats.norm.ppf(0.75) - scipy.stats.norm.ppf(0.25)

        # FIXME -- should we do some stats after taking off the mean difference?
        
        for bini,(mlo,mhi) in enumerate(zip(magbins, magbins[1:])):
            I = P[(meanmag[P] >= mlo) * (meanmag[P] < mhi)]
            midmag.append((mlo+mhi)/2.)
            median_err1.append(np.median(magerr1[I]))
            if len(I) == 0:
                continue
            # median and +- 1 sigma quantiles
            ybin = y[I]
            vals[bini,0] = np.percentile(ybin, 16)
            vals[bini,1] = np.median(ybin)
            vals[bini,2] = np.percentile(ybin, 84)
            # +- 2 sigma quantiles
            vals[bini,3] = np.percentile(ybin, 2.3)
            vals[bini,4] = np.percentile(ybin, 97.7)

            iqd = np.percentile(ybin, 75) - np.percentile(ybin, 25)
            
            print('Mag bin', midmag[-1], ': IQD is factor', iqd / iqd_gauss,
                  'vs expected for Gaussian;', len(ybin), 'points')

            # if iqd > iqd_gauss:
            #     # What error adding in quadrature would you need to make the IQD match?
            #     err = median_err1[-1]
            #     target_err = err * (iqd / iqd_gauss)
            #     sys_err = np.sqrt(target_err**2 - err**2)
            #     print('--> add systematic error', sys_err)

        # ~ Johan's cuts
        mlo = 21.
        mhi = dict(g=24., r=23.5, z=22.5)[band]
        I = P[(meanmag[P] >= mlo) * (meanmag[P] < mhi)]
        ybin = y[I]
        iqd = np.percentile(ybin, 75) - np.percentile(ybin, 25)
        print('Mag bin', mlo, mhi, 'band', band, ': IQD is factor',
              iqd / iqd_gauss, 'vs expected for Gaussian;', len(ybin), 'points')
        if iqd > iqd_gauss:
            # What error adding in quadrature would you need to make
            # the IQD match?
            err = np.median(np.hypot(magerr1[I], magerr2[I]))
            print('Median error (hypot):', err)
            target_err = err * (iqd / iqd_gauss)
            print('Target:', target_err)
            sys_err = np.sqrt((target_err**2 - err**2) / 2.)
            print('--> add systematic error', sys_err)

            # check...
            err_sys = np.hypot(np.hypot(magerr1, sys_err),
                               np.hypot(magerr2, sys_err))
            ysys = (mag2 - mag1) / err_sys
            ysys = ysys[I]
            print('Resulting median error:', np.median(err_sys[I]))
            iqd_sys = np.percentile(ysys, 75) - np.percentile(ysys, 25)
            print('--> IQD', iqd_sys / iqd_gauss, 'vs Gaussian')
            # Hmmm, this doesn't work... totally overshoots.
            
            
        plt.errorbar(midmag, vals[:,1], fmt='o', color='b',
                     yerr=(vals[:,1]-vals[:,0], vals[:,2]-vals[:,1]),
                     capthick=3, zorder=20)
        plt.errorbar(midmag, vals[:,1], fmt='o', color='b',
                     yerr=(vals[:,1]-vals[:,3], vals[:,4]-vals[:,1]),
                     capthick=2, zorder=20)
        plt.axhline( 1., color='b', alpha=0.2)
        plt.axhline(-1., color='b', alpha=0.2)
        plt.axhline( 2., color='b', alpha=0.2)
        plt.axhline(-2., color='b', alpha=0.2)

        for mag,err,y in zip(midmag, median_err1, vals[:,3]):
            if not np.isfinite(err):
                continue
            if y < -6:
                continue
            plt.text(mag, y-0.1, '%.3f' % err, va='top', ha='center', color='k',
                     fontsize=10)
        
        plt.xlabel('(%s + %s)/2 %s (mag), PSFs' % (name1, name2, band))
        plt.ylabel('(%s %s - %s %s) / errors (sigma)' %
                   (name2, band, name1, band))
        plt.axhline(0., color='k', alpha=1.)

        plt.axvline(21, color='k', alpha=0.3)
        plt.axvline(dict(g=24, r=23.5, z=22.5)[band], color='k', alpha=0.3)

        plt.axis([24.1, 16, -6, 6])
        plt.title(tt)
        ps.savefig()

        #magbins = np.append([16, 18], np.arange(20, 24.001, 0.5))
        if band == 'g':
            magbins = [20, 24]
        elif band == 'r':
            magbins = [20, 23.5]
        elif band == 'z':
            magbins = [20, 22.5]

        slo,shi = -5,5
        plt.clf()
        ha = dict(bins=25, range=(slo,shi), histtype='step', normed=True)
        y = (mag2 - mag1) / np.hypot(magerr1, magerr2)
        midmag = []
        nn = []
        rgbs = []
        lt,lp = [],[]
        for bini,(mlo,mhi) in enumerate(zip(magbins, magbins[1:])):
            I = P[(mag1[P] >= mlo) * (mag1[P] < mhi)]
            if len(I) == 0:
                continue
            ybin = y[I]
            rgb = [0.,0.,0.]
            rgb[0] = float(bini) / (len(magbins)-1)
            rgb[2] = 1. - rgb[0]
            n,b,p = plt.hist(ybin, color=rgb, **ha)
            lt.append('mag %g to %g' % (mlo,mhi))
            lp.append(p[0])
            midmag.append((mlo+mhi)/2.)
            nn.append(n)
            rgbs.append(rgb)
            
        bins = []
        gaussint = []
        for blo,bhi in zip(b, b[1:]):
            #midbin.append((blo+bhi)/2.)
            #gaussint.append(scipy.stats.norm.cdf(bhi) -
            #                scipy.stats.norm.cdf(blo))
            c = scipy.stats.norm.cdf(bhi) - scipy.stats.norm.cdf(blo)
            c /= (bhi - blo)
            bins.extend([blo,bhi])
            gaussint.extend([c,c])
        plt.plot(bins, gaussint, 'k-', lw=2, alpha=0.5)
            
        plt.legend(lp, lt)
        plt.title(tt)
        plt.xlim(slo,shi)
        ps.savefig()

        bincenters = b[:-1] + (b[1]-b[0])/2.
        plt.clf()
        lp = []
        for n,rgb,mlo,mhi in zip(nn, rgbs, magbins, magbins[1:]):
            p = plt.plot(bincenters, n, '-', color=rgb)
            lp.append(p[0])
        plt.plot(bincenters, gaussint[::2], 'k-', alpha=0.5, lw=2)
        plt.legend(lp, lt)
        plt.title(tt)
        plt.xlim(slo,shi)
        ps.savefig()
Example #46
0
def compare_mags(TT, name, ps):
    for i,T in enumerate(TT):
        T.set('exp', np.zeros(len(T), np.uint8)+i)

    plt.clf()
    ap = 5    
    for i,T in enumerate(TT):
        cc = 'rgb'[i]
        plt.plot(T.flux, T.apflux[:, ap] / T.flux,
                 '.', color=cc, alpha=0.5)

        ff, frac = [],[]
        mags = np.arange(14, 24)
        for mlo,mhi in zip(mags, mags[1:]):
            flo = NanoMaggies.magToNanomaggies(mhi)
            fhi = NanoMaggies.magToNanomaggies(mlo)
            I = np.flatnonzero((T.flux > flo) * (T.flux <= fhi))
            ff.append(np.sqrt(flo * fhi))
            frac.append(np.median(T.apflux[I,ap] / T.flux[I]))
        plt.plot(ff, frac, 'o-', color=cc)
        
    plt.xscale('symlog')
    plt.xlim(1., 1e3)
    plt.ylim(0.9, 1.1)
    plt.xlabel('Forced-phot flux')
    plt.ylabel('Aperture / Forced-phot flux')
    plt.axhline(1, color='k', alpha=0.1)
    plt.title('%s region: Aperture %i fluxes' % (name, ap))
    ps.savefig()

    T = merge_tables(TT)

    T.bobjid = T.brickid.astype(int) * 10000 + T.objid
    bomap = {}
    for i,bo in enumerate(T.bobjid):
        try:
            bomap[bo].append(i)
        except KeyError:
            bomap[bo] = [i]

    II = []
    for bo,ii in bomap.items():
        if len(ii) != 3:
            continue
        II.append(ii)
    II = np.array(II)
    print 'II', II.shape

    exps = T.exp[II]
    print 'exposures:', exps
    assert(np.all(T.exp[II[:,0]] == 0))
    assert(np.all(T.exp[II[:,1]] == 1))
    assert(np.all(T.exp[II[:,2]] == 2))

    fluxes = T.flux[II]
    print 'fluxes', fluxes.shape
    meanflux = np.mean(fluxes, axis=1)
    print 'meanfluxes', meanflux.shape

    plt.clf()
    for i in range(3):
        plt.plot(meanflux, fluxes[:,i] / meanflux, '.',
                 color='rgb'[i], alpha=0.5)
    #plt.yscale('symlog')
    plt.xscale('symlog')
    plt.xlabel('Mean flux (nanomaggies)')
    plt.ylabel('Forced-phot flux / Mean')
    #plt.ylim(0, 2)
    plt.ylim(0.9, 1.1)
    plt.xlim(0, 1e3)
    plt.axhline(1, color='k', alpha=0.1)
    plt.title('%s region: Forced-phot fluxes' % name)
    ps.savefig()

    for ap in [4,5,6]:
        apfluxes = T.apflux[:,ap][II,]
        print 'ap fluxes', apfluxes.shape

        plt.clf()
        for i in range(3):
            plt.plot(meanflux, apfluxes[:,i] / meanflux, '.',
                     color='rgb'[i], alpha=0.5)
        plt.xscale('symlog')
        plt.xlabel('Mean flux (nanomaggies)')
        plt.ylabel('Aperture(%i) flux / Mean' % ap)
        plt.ylim(0.9, 1.1)
        plt.xlim(0, 1e3)
        plt.axhline(1, color='k', alpha=0.1)
        plt.title('%s region: Aperture %i fluxes' % (name, ap))
        ps.savefig()

        plt.clf()
        for i in range(3):
            plt.plot(fluxes[:,i], apfluxes[:,i] / fluxes[:,i], '.',
                     color='rgb'[i], alpha=0.5)
        plt.xscale('symlog')
        plt.xlim(0, 1e3)
        plt.ylim(0.9, 1.1)
        plt.xlabel('Forced-phot flux')
        plt.ylabel('Aperture / Forced-phot flux')
        plt.axhline(1, color='k', alpha=0.1)
        plt.title('%s region: Aperture %i fluxes' % (name, ap))
        ps.savefig()
Example #47
0
    # print('Reading WCS from', flt2)
    # wcs2 = Sip(flt2, 0)
    # wcs2.ensure_inverse_polynomials()

    print('T1 X,Y ranges', T1.x.min(), T1.x.max(), T1.y.min(), T1.y.max())
    print('T2 X,Y ranges', T2.x.min(), T2.x.max(), T2.y.min(), T2.y.max())

    # ~ 1e-6, 0.0006
    # ok,x,y = wcs2.radec2pixelxy(T2.ra, T2.dec)
    # print('Scatter wcs x vs catalog x:', np.mean(x - T2.x), np.std(x - T2.x))
    # print('Scatter wcs y vs catalog y:', np.mean(y - T2.y), np.std(y - T2.y))

    ok, x, y = wcs1.radec2pixelxy(T2.ra, T2.dec)
    print('Converted X,Y ranges:', x.min(), x.max(), y.min(), y.max())
    T2.x = x
    T2.y = y

    TT = merge_tables([T1, T2])
    TT.writeto(gstout)
    print('Wrote', gstout)

    hdr = pyfits.open(flt1)[0].header
    hdr['IMAGEW'] = 4096
    hdr['IMAGEH'] = 4096
    pyfits.writeto(fltout, None, header=hdr, clobber=True)
    print('Wrote', fltout)

    #cmd = 'cp "%s" "%s"' % (flt1, fltout)
    #print(cmd)
    #os.system(cmd)
Example #48
0
def main():

    parser = argparse.ArgumentParser()
    parser.add_argument('--build-sample', action='store_true', help='Build the sample.')
    parser.add_argument('--jpg-cutouts', action='store_true', help='Get jpg cutouts from the viewer.')
    parser.add_argument('--ccd-cutouts', action='store_true', help='Get CCD cutouts of each galaxy.')
    parser.add_argument('--runbrick', action='store_true', help='Run the pipeline.')
    parser.add_argument('--build-webpage', action='store_true', help='(Re)build the web content.')
    args = parser.parse_args()

    # Top-level directory
    key = 'LEGACY_SURVEY_LARGE_GALAXIES'
    if key not in os.environ:
        print('Required ${} environment variable not set'.format(key))
        return 0
    largedir = os.getenv(key)
    samplefile = os.path.join(largedir, 'large-galaxies-sample.fits')

    # --------------------------------------------------
    # Build the sample of large galaxies based on the available imaging.
    if args.build_sample:

        # Read the parent catalog.
        cat = read_rc3()
        
        # Create a simple WCS object for each object and find all the CCDs
        # touching that WCS footprint.
        survey = LegacySurveyData(version='dr2') # hack!
        allccds = survey.get_ccds()
        keep = np.concatenate((survey.apply_blacklist(allccds),
                               survey.photometric_ccds(allccds)))
        allccds.cut(keep)

        ccdlist = []
        outcat = []
        for gal in cat:
            galwcs = _simplewcs(gal)

            ccds1 = allccds[ccds_touching_wcs(galwcs, allccds)]
            ccds1 = ccds1[_uniqccds(ccds1)]
            
            if len(ccds1) > 0 and 'g' in ccds1.filter and 'r' in ccds1.filter and 'z' in ccds1.filter:
                print('Found {} CCDs for {}, D(25)={:.4f}'.format(
                    len(ccds1), gal['GALAXY'], gal['RADIUS']))
                
                ccdsfile = os.path.join(largedir, 'ccds', '{}-ccds.fits'.format(gal['GALAXY'].strip().lower()))
                print('  Writing {}'.format(ccdsfile))
                if os.path.isfile(ccdsfile):
                    os.remove(ccdsfile)
                ccds1.writeto(ccdsfile)
                
                ccdlist.append(ccds1)
                if len(outcat) == 0:
                    outcat = gal
                else:
                    outcat = vstack((outcat, gal))
                #if gal['GALAXY'] == 'MCG5-19-36':
                #    pdb.set_trace()

        # Write out the final catalog.
        samplefile = os.path.join(largedir, 'large-galaxies-sample.fits')
        if os.path.isfile(samplefile):
            os.remove(samplefile)
        print('Writing {}'.format(samplefile))
        outcat.write(samplefile)
        print(outcat)

        # Do we need to transfer any of the data to nyx?
        _getfiles(merge_tables(ccdlist))

    # --------------------------------------------------
    # Get data, model, and residual cutouts from the legacysurvey viewer.  Also
    # get thumbnails that are lower resolution.
    if args.jpg_cutouts:
        thumbsize = 100
        sample = fits.getdata(samplefile, 1)
        for gal in sample:
            size = np.ceil(10*gal['RADIUS']/PIXSCALE)
            thumbpixscale = PIXSCALE*size/thumbsize

            #imageurl = 'http://legacysurvey.org/viewer/jpeg-cutout-decals-dr2?ra={:.6f}&dec={:.6f}'.format(gal['RA'], gal['DEC'])+\
            #  '&pixscale={:.3f}&size={:g}'.format(PIXSCALE, size)
            #imagejpg = os.path.join(largedir, 'cutouts', gal['GALAXY'].strip().lower()+'-image.jpg')
            #if os.path.isfile(imagejpg):
            #    os.remove(imagejpg)
            #os.system('wget --continue -O {:s} "{:s}"' .format(imagejpg, imageurl))

            thumburl = 'http://legacysurvey.org/viewer/jpeg-cutout-decals-dr2?ra={:.6f}&dec={:.6f}'.format(gal['RA'], gal['DEC'])+\
              '&pixscale={:.3f}&size={:g}'.format(thumbpixscale, thumbsize)
            thumbjpg = os.path.join(largedir, 'cutouts', gal['GALAXY'].strip().lower()+'-image-thumb.jpg')
            if os.path.isfile(thumbjpg):
                os.remove(thumbjpg)
            os.system('wget --continue -O {:s} "{:s}"' .format(thumbjpg, thumburl))

    # --------------------------------------------------
    # (Re)build the webpage.
    if args.build_webpage:

        # index.html
        html = open(os.path.join(largedir, 'index.html'), 'w')
        html.write('<html><body>\n')
        html.write('<h1>Sample of Large Galaxies</h1>\n')
        html.write('<table border="2" width="30%">\n')
        html.write('<tbody>\n')
        sample = fits.getdata(samplefile, 1)
        for gal in sample:
            # Add coordinates and sizes here.
            galaxy = gal['GALAXY'].strip().lower()
            html.write('<tr>\n')
            html.write('<td><a href="html/{}.html">{}</a></td>\n'.format(galaxy, galaxy.upper()))
            html.write('<td><a href="http://legacysurvey.org/viewer/?ra={:.6f}&dec={:.6f}" target="_blank"><img src=cutouts/{}-image-thumb.jpg alt={} /></a></td>\n'.format(gal['RA'], gal['DEC'], galaxy, galaxy.upper()))
#           html.write('<td><a href="html/{}.html"><img src=cutouts/{}-image-thumb.jpg alt={} /></a></td>\n'.format(galaxy, galaxy, galaxy.upper()))
            html.write('</tr>\n')
        html.write('</tbody>\n')
        html.write('</table>\n')
        html.write('</body></html>\n')
        html.close()

        sys.exit(1)
    
        # individual galaxy pages
        for gal in sample[:3]:
            galaxy = gal['GALAXY'].strip().lower()
            html = open(os.path.join(largedir, 'html/{}.html'.format(galaxy)), 'w')
            html.write('<html><body>\n')
            html.write('<a href=../cutouts/{}.jpg><img src=../cutouts/{}-image.jpg alt={} /></a>\n'.format(galaxy, galaxy, galaxy, galaxy.upper()))
            html.write('</body></html>\n')
            html.close()

    # --------------------------------------------------
    # Get cutouts of all the CCDs for each galaxy.
    if args.ccd_cutouts:
        sample = fits.getdata(samplefile, 1)

        for gal in sample[1:2]:
            galaxy = gal['GALAXY'].strip().lower()
            ccdsfile = os.path.join(largedir, 'ccds', '{}-ccds.fits'.format(galaxy))
            ccds = fits.getdata(ccdsfile)

            pdb.set_trace()

    # --------------------------------------------------
    # Run the pipeline.
    if args.runbrick:
        sample = fits.getdata(samplefile, 1)

        for gal in sample[1:2]:
            galaxy = gal['GALAXY'].strip().lower()
            diam = 10*np.ceil(gal['RADIUS']/PIXSCALE).astype('int16') # [pixels]

            # Note: zoom is relative to the center of an imaginary brick with
            # dimensions (0, 3600, 0, 3600).
            survey = LegacySurveyData(version='dr2', output_dir=largedir)
            run_brick(None, survey, radec=(gal['RA'], gal['DEC']), blobxy=zip([diam/2], [diam/2]), 
                      threads=1, zoom=(1800-diam/2, 1800+diam/2, 1800-diam/2, 1800+diam/2),
                      wise=False, forceAll=True, writePickles=False, do_calibs=False,
                      write_metrics=False, pixPsf=True, splinesky=True, 
                      early_coadds=True, stages=['writecat'], ceres=False)

            pdb.set_trace()
Example #49
0
def main():
    survey = LegacySurveyData()
    ccds = survey.get_ccds()
    print(len(ccds), 'CCDs')

    expnums = np.unique(ccds.expnum)
    print(len(expnums), 'unique exposures')

    for expnum in expnums:

        expnumstr = '%08i' % expnum
        skyoutfn = os.path.join('splinesky', expnumstr[:5], 'decam-%s.fits' % expnumstr)
        psfoutfn = os.path.join('psfex', expnumstr[:5], 'decam-%s.fits' % expnumstr)

        if os.path.exists(skyoutfn) and os.path.exists(psfoutfn):
            print('Exposure', expnum, 'is done already')
            continue

        C = ccds[ccds.expnum == expnum]
        print(len(C), 'CCDs in expnum', expnum)

        psfex = []
        psfhdrvals = []

        splinesky = []
        skyhdrvals = []

        for ccd in C:
            im = survey.get_image_object(ccd)

            fn = im.splineskyfn
            if os.path.exists(fn):
                T = fits_table(fn)
                splinesky.append(T)
                # print(fn)
                # T.about()
                hdr = fitsio.read_header(fn)
                skyhdrvals.append([hdr[k] for k in [
                            'SKY', 'LEGPIPEV', 'PLVER']] + [expnum, ccd.ccdname])
            else:
                print('File not found:', fn)

            fn = im.psffn
            if os.path.exists(fn):
                T = fits_table(fn)
                hdr = fitsio.read_header(fn, ext=1)

                keys = ['LOADED', 'ACCEPTED', 'CHI2', 'POLNAXIS', 
                        'POLNGRP', 'PSF_FWHM', 'PSF_SAMP', 'PSFNAXIS',
                        'PSFAXIS1', 'PSFAXIS2', 'PSFAXIS3',]

                if hdr['POLNAXIS'] == 0:
                    # No polynomials.  Fake it.
                    T.polgrp1 = np.array([0])
                    T.polgrp2 = np.array([0])
                    T.polname1 = np.array(['fake'])
                    T.polname2 = np.array(['fake'])
                    T.polzero1 = np.array([0])
                    T.polzero2 = np.array([0])
                    T.polscal1 = np.array([1])
                    T.polscal2 = np.array([1])
                    T.poldeg1 = np.array([0])
                    T.poldeg2 = np.array([0])
                else:
                    keys.extend([
                            'POLGRP1', 'POLNAME1', 'POLZERO1', 'POLSCAL1',
                            'POLGRP2', 'POLNAME2', 'POLZERO2', 'POLSCAL2',
                            'POLDEG1'])

                for k in keys:
                    T.set(k.lower(), np.array([hdr[k]]))
                psfex.append(T)
                #print(fn)
                #T.about()
    
                hdr = fitsio.read_header(fn)
                psfhdrvals.append([hdr.get(k,'') for k in [
                    'LEGPIPEV', 'PLVER']] + [expnum, ccd.ccdname])
            else:
                print('File not found:', fn)

        if len(psfex):
            padded = pad_arrays([p.psf_mask[0] for p in psfex])
            cols = psfex[0].columns()
            cols.remove('psf_mask')
            T = merge_tables(psfex, columns=cols)
            T.psf_mask = np.concatenate([[p] for p in padded])
            T.legpipev = np.array([h[0] for h in psfhdrvals])
            T.plver    = np.array([h[1] for h in psfhdrvals])
            T.expnum   = np.array([h[2] for h in psfhdrvals])
            T.ccdname  = np.array([h[3] for h in psfhdrvals])
            fn = psfoutfn
            trymakedirs(fn, dir=True)
            T.writeto(fn)
            print('Wrote', fn)

        if len(splinesky):
            T = fits_table()
            T.gridw = np.array([t.gridvals[0].shape[1] for t in splinesky])
            T.gridh = np.array([t.gridvals[0].shape[0] for t in splinesky])

            padded = pad_arrays([t.gridvals[0] for t in splinesky])
            T.gridvals = np.concatenate([[p] for p in padded])
            padded = pad_arrays([t.xgrid[0] for t in splinesky])
            T.xgrid = np.concatenate([[p] for p in padded])
            padded = pad_arrays([t.xgrid[0] for t in splinesky])
            T.ygrid = np.concatenate([[p] for p in padded])
    
            cols = splinesky[0].columns()
            print('Columns:', cols)
            for c in ['gridvals', 'xgrid', 'ygrid']:
                cols.remove(c)

            T.add_columns_from(merge_tables(splinesky, columns=cols))
            T.skyclass = np.array([h[0] for h in skyhdrvals])
            T.legpipev = np.array([h[1] for h in skyhdrvals])
            T.plver    = np.array([h[2] for h in skyhdrvals])
            T.expnum   = np.array([h[3] for h in skyhdrvals])
            T.ccdname  = np.array([h[4] for h in skyhdrvals])
            fn = skyoutfn
            trymakedirs(fn, dir=True)
            T.writeto(fn)
            print('Wrote', fn)
Example #50
0
def main():
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('-o', '--out', dest='outfn', help='Output filename',
                      default='TMP/nexp.fits')
    parser.add_argument('--merge', action='store_true', help='Merge sub-tables')
    parser.add_argument('--plot', action='store_true', help='Plot results')
    parser.add_argument('files', metavar='nexp-file.fits.gz', nargs='+',
                        help='List of nexp files to process')

    opt = parser.parse_args()

    fns = opt.files

    if opt.merge:
        from astrometry.util.fits import merge_tables
        TT = []
        for fn in fns:
            T = fits_table(fn)
            print(fn, '->', len(T))
            TT.append(T)
        T = merge_tables(TT)
        T.writeto(opt.outfn)
        print('Wrote', opt.outfn)

    if opt.plot:
        T = fits_table(opt.files[0])
        import pylab as plt
        import matplotlib
        
        ax = [360, 0, -21, 36]

        def radec_plot():
            plt.axis(ax)
            plt.xlabel('RA (deg)')
            plt.xticks(np.arange(0, 361, 45))
            plt.ylabel('Dec (deg)')

            gl = np.arange(361)
            gb = np.zeros_like(gl)
            from astrometry.util.starutil_numpy import lbtoradec
            rr,dd = lbtoradec(gl, gb)
            plt.plot(rr, dd, 'k-', alpha=0.5, lw=1)
            rr,dd = lbtoradec(gl, gb+10)
            plt.plot(rr, dd, 'k-', alpha=0.25, lw=1)
            rr,dd = lbtoradec(gl, gb-10)
            plt.plot(rr, dd, 'k-', alpha=0.25, lw=1)
            
        plt.figure(figsize=(8,5))
        plt.subplots_adjust(left=0.1, right=0.98, top=0.93)
        
        # Map of the tile centers we want to observe...
        O = fits_table('obstatus/decam-tiles_obstatus.fits')
        O.cut(O.in_desi == 1)
        rr,dd = np.meshgrid(np.linspace(ax[1],ax[0], 700),
                            np.linspace(ax[2],ax[3], 200))
        from astrometry.libkd.spherematch import match_radec
        I,J,d = match_radec(O.ra, O.dec, rr.ravel(), dd.ravel(), 1.)
        desimap = np.zeros(rr.shape, bool)
        desimap.flat[J] = True

        def desi_map():
            # Show the DESI tile map in the background.
            from astrometry.util.plotutils import antigray
            plt.imshow(desimap, origin='lower', interpolation='nearest',
                       extent=[ax[1],ax[0],ax[2],ax[3]], aspect='auto',
                       cmap=antigray, vmax=8)

        for band in 'grz':
            plt.clf()
            desi_map()
            N = T.get('nexp_%s' % band)
            I = np.flatnonzero(N > 0)
            #cm = matplotlib.cm.get_cmap('jet', 6)
            #cm = matplotlib.cm.get_cmap('winter', 5)
            cm = matplotlib.cm.viridis
            cm = matplotlib.cm.get_cmap(cm, 5)
            plt.scatter(T.ra[I], T.dec[I], c=N[I], s=2,
                        edgecolors='none',
                        vmin=0.5, vmax=5.5, cmap=cm)
            radec_plot()
            cax = colorbar_axes(plt.gca(), frac=0.06)
            plt.colorbar(cax=cax, ticks=range(6))
            #plt.colorbar(ticks=range(6))
            plt.title('DECaLS DR3: Number of exposures in %s' % band)
            plt.savefig('nexp-%s.png' % band)

            plt.clf()
            desi_map()
            plt.scatter(T.ra, T.dec, c=T.get('nexp_%s' % band), s=2,
                        edgecolors='none', vmin=0, vmax=2.)
            radec_plot()
            plt.colorbar()
            plt.title('DECaLS DR3: PSF size, band %s' % band)
            plt.savefig('psfsize-%s.png' % band)

        return 0
            
        for col in ['nobjs', 'npsf', 'nsimp', 'nexp', 'ndev', 'ncomp']:
            plt.clf()
            desi_map()
            N = T.get(col)
            mx = np.percentile(N, 99.5)
            plt.scatter(T.ra, T.dec, c=N, s=2,
                        edgecolors='none', vmin=0, vmax=mx)
            radec_plot()
            plt.colorbar()
            plt.title('DECaLS DR3: Number of objects of type %s' % col[1:])
            plt.savefig('nobjs-%s.png' % col[1:])

        Ntot = T.nobjs
        for col in ['npsf', 'nsimp', 'nexp', 'ndev', 'ncomp']:
            plt.clf()
            desi_map()
            N = T.get(col) / Ntot.astype(np.float32)
            mx = np.percentile(N, 99.5)
            plt.scatter(T.ra, T.dec, c=N, s=2,
                        edgecolors='none', vmin=0, vmax=mx)
            radec_plot()
            plt.colorbar()
            plt.title('DECaLS DR3: Fraction of objects of type %s' % col[1:])
            plt.savefig('fobjs-%s.png' % col[1:])

            
        return 0

    # fnpats = opt.files
    # fns = []
    # for pat in fnpats:
    #     pfns = glob(pat)
    #     fns.extend(pfns)
    #     print('Pattern', pat, '->', len(pfns), 'files')
    #fns = glob('coadd/*/*/*-nexp*')
    #fns = glob('coadd/000/*/*-nexp*')
    #fns = glob('coadd/000/0001*/*-nexp*')
    fns.sort()
    print(len(fns), 'nexp files')
    
    brickset = set()
    bricklist = []
    gn = []
    rn = []
    zn = []
    
    gnhist = []
    rnhist = []
    znhist = []
    
    nnhist = 6
    
    gdepth = []
    rdepth = []
    zdepth = []
    
    ibricks = []
    nsrcs = []
    npsf  = []
    nsimp = []
    nexp  = []
    ndev  = []
    ncomp = []
    
    gpsfsize = []
    rpsfsize = []
    zpsfsize = []
    ebv = []
    gtrans = []
    rtrans = []
    ztrans = []
    
    bricks = fits_table('survey-bricks.fits.gz')
    
    #sfd = SFDMap()
    
    W = H = 3600
    # H=3600
    # xx,yy = np.meshgrid(np.arange(W), np.arange(H))
    unique = np.ones((H,W), bool)
    tlast = 0
    
    for fn in fns:
        print('File', fn)
        words = fn.split('/')
        dirprefix = '/'.join(words[:-4])
        print('Directory prefix:', dirprefix)
        words = words[-4:]
        brick = words[2]
        print('Brick', brick)
        if not brick in brickset:
            brickset.add(brick)
            bricklist.append(brick)
            gn.append(0)
            rn.append(0)
            zn.append(0)
    
            gnhist.append([0 for i in range(nnhist)])
            rnhist.append([0 for i in range(nnhist)])
            znhist.append([0 for i in range(nnhist)])
    
            index = -1
            ibrick = np.nonzero(bricks.brickname == brick)[0][0]
            ibricks.append(ibrick)
            tfn = os.path.join(dirprefix, 'tractor', brick[:3], 'tractor-%s.fits'%brick)
            print('Tractor filename', tfn)
            T = fits_table(tfn, columns=['brick_primary', 'type', 'decam_psfsize',
                                         'ebv', 'decam_mw_transmission'])
            T.cut(T.brick_primary)
            nsrcs.append(len(T))
            types = Counter([t.strip() for t in T.type])
            npsf.append(types['PSF'])
            nsimp.append(types['SIMP'])
            nexp.append(types['EXP'])
            ndev.append(types['DEV'])
            ncomp.append(types['COMP'])
            print('N sources', nsrcs[-1])
    
            gpsfsize.append(np.median(T.decam_psfsize[:,1]))
            rpsfsize.append(np.median(T.decam_psfsize[:,2]))
            zpsfsize.append(np.median(T.decam_psfsize[:,4]))
    
            ebv.append(np.median(T.ebv))
            gtrans.append(np.median(T.decam_mw_transmission[:,1]))
            rtrans.append(np.median(T.decam_mw_transmission[:,2]))
            ztrans.append(np.median(T.decam_mw_transmission[:,4]))
    
            br = bricks[ibrick]
    
            print('Computing unique brick pixels...')
            #wcs = Tan(fn, 0)
            #W,H = int(wcs.get_width()), int(wcs.get_height())
    
            pixscale = 0.262/3600.
            wcs = Tan(br.ra, br.dec, W/2.+0.5, H/2.+0.5,
                      -pixscale, 0., 0., pixscale,
                      float(W), float(H))
            import time
    
            t0 = time.clock()
    
            unique[:,:] = True
    
            find_unique_pixels(wcs, W, H, unique,
                               br.ra1, br.ra2, br.dec1, br.dec2)
    
            # for i in range(W/2):
            #     allin = True
            #     lo,hi = i, W-i-1
            #     # one slice per side
            #     side = slice(lo,hi+1)
            #     top = (lo, side)
            #     bot = (hi, side)
            #     left  = (side, lo)
            #     right = (side, hi)
            #     for slc in [top, bot, left, right]:
            #         #print('xx,yy', xx[slc], yy[slc])
            #         rr,dd = wcs.pixelxy2radec(xx[slc]+1, yy[slc]+1)
            #         U = (rr >= br.ra1 ) * (rr < br.ra2 ) * (dd >= br.dec1) * (dd < br.dec2)
            #         #print('Pixel', i, ':', np.sum(U), 'of', len(U), 'pixels are unique')
            #         allin *= np.all(U)
            #         unique[slc] = U
            #     if allin:
            #         print('Scanned to pixel', i)
            #         break
    
            t1 = time.clock()
            U = np.flatnonzero(unique)
            t2 = time.clock()
            print(len(U), 'of', W*H, 'pixels are unique to this brick')
    
            # #t3 = time.clock()
            #rr,dd = wcs.pixelxy2radec(xx+1, yy+1)
            # #t4 = time.clock()
            # #u = (rr >= br.ra1 ) * (rr < br.ra2 ) * (dd >= br.dec1) * (dd < br.dec2)
            # #t5 = time.clock()
            # #U2 = np.flatnonzero(u)
            #U2 = np.flatnonzero((rr >= br.ra1 ) * (rr < br.ra2 ) *
            #                    (dd >= br.dec1) * (dd < br.dec2))
            #assert(np.all(U == U2))
            #assert(len(U) == len(U2))
            # #t6 = time.clock()
            # print(len(U2), 'of', W*H, 'pixels are unique to this brick')
            # 
    
            #print(t0-tlast, 'other time')
            #tlast = time.clock() #t2
            #print('t1:', t1-t0, 't2', t2-t1)
    
            # #print('t4:', t4-t3, 't5', t5-t4, 't6', t6-t5)
            # 
    
        else:
            index = bricklist.index(brick)
            assert(index == len(bricklist)-1)
    
        index = bricklist.index(brick)
        assert(index == len(bricklist)-1)
    
        filepart = words[-1]
        filepart = filepart.replace('.fits.gz', '')
        print('File:', filepart)
        band = filepart[-1]
        assert(band in 'grz')
    
        nlist,nhist = dict(g=(gn,gnhist), r=(rn,rnhist), z=(zn,znhist))[band]
    
        upix = fitsio.read(fn).flat[U]
        med = np.median(upix)
        print('Band', band, ': Median', med)
        nlist[index] = med
    
        hist = nhist[index]
        for i in range(nnhist):
            if i < nnhist-1:
                hist[i] = np.sum(upix == i)
            else:
                hist[i] = np.sum(upix >= i)
        assert(sum(hist) == len(upix))
        print('Number of exposures histogram:', hist)
    
    ibricks = np.array(ibricks)
    
    print('Maximum number of sources:', max(nsrcs))
    
    T = fits_table()
    T.brickname = np.array(bricklist)
    T.ra  = bricks.ra [ibricks]
    T.dec = bricks.dec[ibricks]
    T.nexp_g = np.array(gn).astype(np.int16)
    T.nexp_r = np.array(rn).astype(np.int16)
    T.nexp_z = np.array(zn).astype(np.int16)
    T.nexphist_g = np.array(gnhist).astype(np.int32)
    T.nexphist_r = np.array(rnhist).astype(np.int32)
    T.nexphist_z = np.array(znhist).astype(np.int32)
    T.nobjs  = np.array(nsrcs).astype(np.int16)
    T.npsf   = np.array(npsf ).astype(np.int16)
    T.nsimp  = np.array(nsimp).astype(np.int16)
    T.nexp   = np.array(nexp ).astype(np.int16)
    T.ndev   = np.array(ndev ).astype(np.int16)
    T.ncomp  = np.array(ncomp).astype(np.int16)
    T.psfsize_g = np.array(gpsfsize).astype(np.float32)
    T.psfsize_r = np.array(rpsfsize).astype(np.float32)
    T.psfsize_z = np.array(zpsfsize).astype(np.float32)
    T.ebv = np.array(ebv).astype(np.float32)
    T.trans_g = np.array(gtrans).astype(np.float32)
    T.trans_r = np.array(rtrans).astype(np.float32)
    T.trans_z = np.array(ztrans).astype(np.float32)
    T.writeto(opt.outfn)
                print('t depthhi', t.depthhi)


        assert(np.all(t.depthlo == T.depthlo))
        assert(np.all(t.depthhi == T.depthhi))
        cols = t.get_columns()
        t.brickname = np.array([brickname_from_filename(fn)] * len(t))
        for band in 'grz':
            col = 'counts_ptsrc_%s' % band
            if not col in cols:
                continue
            C = T.get(col)
            C += t.get(col)
            col = 'counts_gal_%s' % band
            C = T.get(col)
            C += t.get(col)
        TT.append(t)
            
    T.delete_column('brickname')
    T.writeto(summaryfn)
    print('Wrote', summaryfn)
    
    T = merge_tables(TT, columns='fillzero')
    T.writeto(outfn)
    print('Wrote', outfn)


        

    
Example #52
0
def get_catalog_in_wcs(chipwcs,
                       catsurvey_north,
                       catsurvey_south=None,
                       resolve_dec=None,
                       margin=20):
    TT = []
    surveys = [(catsurvey_north, True)]
    if catsurvey_south is not None:
        surveys.append((catsurvey_south, False))

    for catsurvey, north in surveys:
        bricks = bricks_touching_wcs(chipwcs, survey=catsurvey)

        if resolve_dec is not None:
            from astrometry.util.starutil_numpy import radectolb
            bricks.gal_l, bricks.gal_b = radectolb(bricks.ra, bricks.dec)

        for b in bricks:
            # Skip bricks that are entirely on the wrong side of the resolve line (NGC only)
            if resolve_dec is not None and b.gal_b > 0:
                if north and b.dec2 <= resolve_dec:
                    continue
                if not (north) and b.dec1 >= resolve_dec:
                    continue
            # there is some overlap with this brick... read the catalog.
            fn = catsurvey.find_file('tractor', brick=b.brickname)
            if not os.path.exists(fn):
                print('WARNING: catalog', fn, 'does not exist.  Skipping!')
                continue
            print('Reading', fn)
            T = fits_table(fn,
                           columns=[
                               'ra', 'dec', 'brick_primary', 'type', 'release',
                               'brickid', 'brickname', 'objid', 'fracdev',
                               'flux_r', 'shapedev_r', 'shapedev_e1',
                               'shapedev_e2', 'shapeexp_r', 'shapeexp_e1',
                               'shapeexp_e2', 'ref_epoch', 'pmra', 'pmdec',
                               'parallax'
                           ])
            if resolve_dec is not None and b.gal_b > 0:
                if north:
                    T.cut(T.dec >= resolve_dec)
                    print('Cut to', len(T), 'north of the resolve line')
                else:
                    T.cut(T.dec < resolve_dec)
                    print('Cut to', len(T), 'south of the resolve line')
            ok, xx, yy = chipwcs.radec2pixelxy(T.ra, T.dec)
            W, H = chipwcs.get_width(), chipwcs.get_height()
            I, = np.nonzero((xx >= -margin) * (xx <= (W + margin)) *
                            (yy >= -margin) * (yy <= (H + margin)))
            T.cut(I)
            print('Cut to', len(T), 'sources within image + margin')
            T.cut(T.brick_primary)
            print('Cut to', len(T), 'on brick_primary')
            for col in ['out_of_bounds', 'left_blob']:
                if col in T.get_columns():
                    T.cut(T.get(col) == False)
                    print('Cut to', len(T), 'on', col)
            # drop DUP sources
            I, = np.nonzero([t.strip() != 'DUP' for t in T.type])
            T.cut(I)
            print('Cut to', len(T), 'after removing DUP')
            if len(T):
                TT.append(T)
    if len(TT) == 0:
        return None
    T = merge_tables(TT, columns='fillzero')
    T._header = TT[0]._header
    del TT
    print('Total of', len(T), 'catalog sources')

    # Fix up various failure modes:
    # FixedCompositeGalaxy(pos=RaDecPos[240.51147402832561, 10.385488075518923], brightness=NanoMaggies: g=(flux -2.87), r=(flux -5.26), z=(flux -7.65), fracDev=FracDev(0.60177207), shapeExp=re=3.78351e-44, e1=9.30367e-13, e2=1.24392e-16, shapeDev=re=inf, e1=-0, e2=-0)
    # -> convert to EXP
    I, = np.nonzero([
        t == 'COMP' and not np.isfinite(r)
        for t, r in zip(T.type, T.shapedev_r)
    ])
    if len(I):
        print('Converting', len(I), 'bogus COMP galaxies to EXP')
        for i in I:
            T.type[i] = 'EXP'

    # Same thing with the exp component.
    # -> convert to DEV
    I, = np.nonzero([
        t == 'COMP' and not np.isfinite(r)
        for t, r in zip(T.type, T.shapeexp_r)
    ])
    if len(I):
        print('Converting', len(I), 'bogus COMP galaxies to DEV')
        for i in I:
            T.type[i] = 'DEV'
    return T
Example #53
0
    # wcs2.ensure_inverse_polynomials()

    print('T1 X,Y ranges', T1.x.min(), T1.x.max(), T1.y.min(), T1.y.max())
    print('T2 X,Y ranges', T2.x.min(), T2.x.max(), T2.y.min(), T2.y.max())

    # ~ 1e-6, 0.0006
    # ok,x,y = wcs2.radec2pixelxy(T2.ra, T2.dec)
    # print('Scatter wcs x vs catalog x:', np.mean(x - T2.x), np.std(x - T2.x))
    # print('Scatter wcs y vs catalog y:', np.mean(y - T2.y), np.std(y - T2.y))
    
    ok,x,y = wcs1.radec2pixelxy(T2.ra, T2.dec)
    print('Converted X,Y ranges:', x.min(), x.max(), y.min(), y.max())
    T2.x = x
    T2.y = y
    
    TT = merge_tables([T1,T2])
    TT.writeto(gstout)
    print('Wrote', gstout)

    hdr = pyfits.open(flt1)[0].header
    hdr['IMAGEW'] = 4096
    hdr['IMAGEH'] = 4096
    pyfits.writeto(fltout, None, header=hdr, clobber=True)
    print('Wrote', fltout)

    #cmd = 'cp "%s" "%s"' % (flt1, fltout)
    #print(cmd)
    #os.system(cmd)
    

    
Example #54
0
    dr3_fns= glob(os.path.join(NERSC_ROOT,'configs/dr3eBOSS/dr3',
                             "survey-ccds-*.fits.gz"))
    assert(len(eboss_fns) > 0)
    assert(len(dr3_fns) > 0)

    dr3= stack_tables(dr3_fns,textfile=False)
    eboss= stack_tables(eboss_fns,textfile=False)

    dr3.set('pid', add_str_arrays([dr3.expnum.astype(str),
                                 np.char.strip(dr3.image_filename)]))
    eboss.set('pid', add_str_arrays([eboss.expnum.astype(str),
                                   np.char.strip(eboss.image_filename)]))
    dr3.cut( in_eboss(dr3))
    eboss.cut( in_eboss(eboss))

    T=  merge_tables([dr3,eboss], columns='fillzero')
    keep= rm_duplicates(T)
    T.cut(keep)
    name='survey-ccds-ebossDR3.fits'
    T.writeto(name)
    print('Wrote %s' % name)

    a=set(dr3.pid).union(set(eboss.pid))
    fn=[lin.split("decam/")[1] for lin in a]
    name='eboss_image_list.txt'
    with open(name,'w') as foo:
        for f in fn:
            foo.write('%s\n' % f)
    print('Wrote %s' % name) 

 
Example #55
0
    pixscale = 0.262
    apertures = apertures_arcsec / pixscale

    #ps = PlotSequence('uber')
    if False:
        C = fits_table('coadd/000/0001p000/decals-0001p000-ccds.fits')
        for c in C:
            T,hdr = apphot_ps1stars(c, ps, apertures, decals)
            T.writeto('apphot-%08i-%s.fits' % (c.expnum, c.ccdname), header=hdr)
        sys.exit(0)
        
    C = decals.get_ccds_readonly()

    exps = [ 346352, 347460, 347721 ]

    for e in exps:
        print
        print
        print 'Exposure', e
        print
        E = C[C.expnum == e]
        TT = []
        for i,c in enumerate(E):
            print
            print 'Exposure', e, 'chip', i, 'of', len(E)
            print
            T,hdr = apphot_ps1stars(c, ps, apertures, decals)
            TT.append(T)
        T = merge_tables(TT)
        T.writeto('apphot-%08i.fits' % e, primheader=hdr)
Example #56
0
def main():

    parser = argparse.ArgumentParser()
    parser.add_argument('--build-sample',
                        action='store_true',
                        help='Build the sample.')
    parser.add_argument('--jpg-cutouts',
                        action='store_true',
                        help='Get jpg cutouts from the viewer.')
    parser.add_argument('--ccd-cutouts',
                        action='store_true',
                        help='Get CCD cutouts of each galaxy.')
    parser.add_argument('--runbrick',
                        action='store_true',
                        help='Run the pipeline.')
    parser.add_argument('--build-webpage',
                        action='store_true',
                        help='(Re)build the web content.')
    args = parser.parse_args()

    # Top-level directory
    key = 'LEGACY_SURVEY_LARGE_GALAXIES'
    if key not in os.environ:
        print('Required ${} environment variable not set'.format(key))
        return 0
    largedir = os.getenv(key)
    samplefile = os.path.join(largedir, 'large-galaxies-sample.fits')

    # --------------------------------------------------
    # Build the sample of large galaxies based on the available imaging.
    if args.build_sample:

        # Read the parent catalog.
        cat = read_rc3()

        # Create a simple WCS object for each object and find all the CCDs
        # touching that WCS footprint.
        survey = LegacySurveyData(version='dr2')  # hack!
        allccds = survey.get_ccds()
        keep = np.concatenate((survey.apply_blacklist(allccds),
                               survey.photometric_ccds(allccds)))
        allccds.cut(keep)

        ccdlist = []
        outcat = []
        for gal in cat:
            galwcs = _simplewcs(gal)

            ccds1 = allccds[ccds_touching_wcs(galwcs, allccds)]
            ccds1 = ccds1[_uniqccds(ccds1)]

            if len(
                    ccds1
            ) > 0 and 'g' in ccds1.filter and 'r' in ccds1.filter and 'z' in ccds1.filter:
                print('Found {} CCDs for {}, D(25)={:.4f}'.format(
                    len(ccds1), gal['GALAXY'], gal['RADIUS']))

                ccdsfile = os.path.join(
                    largedir, 'ccds',
                    '{}-ccds.fits'.format(gal['GALAXY'].strip().lower()))
                print('  Writing {}'.format(ccdsfile))
                if os.path.isfile(ccdsfile):
                    os.remove(ccdsfile)
                ccds1.writeto(ccdsfile)

                ccdlist.append(ccds1)
                if len(outcat) == 0:
                    outcat = gal
                else:
                    outcat = vstack((outcat, gal))
                #if gal['GALAXY'] == 'MCG5-19-36':
                #    pdb.set_trace()

        # Write out the final catalog.
        samplefile = os.path.join(largedir, 'large-galaxies-sample.fits')
        if os.path.isfile(samplefile):
            os.remove(samplefile)
        print('Writing {}'.format(samplefile))
        outcat.write(samplefile)
        print(outcat)

        # Do we need to transfer any of the data to nyx?
        _getfiles(merge_tables(ccdlist))

    # --------------------------------------------------
    # Get data, model, and residual cutouts from the legacysurvey viewer.  Also
    # get thumbnails that are lower resolution.
    if args.jpg_cutouts:
        thumbsize = 100
        sample = fits.getdata(samplefile, 1)
        for gal in sample:
            size = np.ceil(10 * gal['RADIUS'] / PIXSCALE)
            thumbpixscale = PIXSCALE * size / thumbsize

            #imageurl = 'http://legacysurvey.org/viewer/jpeg-cutout-decals-dr2?ra={:.6f}&dec={:.6f}'.format(gal['RA'], gal['DEC'])+\
            #  '&pixscale={:.3f}&size={:g}'.format(PIXSCALE, size)
            #imagejpg = os.path.join(largedir, 'cutouts', gal['GALAXY'].strip().lower()+'-image.jpg')
            #if os.path.isfile(imagejpg):
            #    os.remove(imagejpg)
            #os.system('wget --continue -O {:s} "{:s}"' .format(imagejpg, imageurl))

            thumburl = 'http://legacysurvey.org/viewer/jpeg-cutout-decals-dr2?ra={:.6f}&dec={:.6f}'.format(gal['RA'], gal['DEC'])+\
              '&pixscale={:.3f}&size={:g}'.format(thumbpixscale, thumbsize)
            thumbjpg = os.path.join(
                largedir, 'cutouts',
                gal['GALAXY'].strip().lower() + '-image-thumb.jpg')
            if os.path.isfile(thumbjpg):
                os.remove(thumbjpg)
            os.system('wget --continue -O {:s} "{:s}"'.format(
                thumbjpg, thumburl))

    # --------------------------------------------------
    # (Re)build the webpage.
    if args.build_webpage:

        # index.html
        html = open(os.path.join(largedir, 'index.html'), 'w')
        html.write('<html><body>\n')
        html.write('<h1>Sample of Large Galaxies</h1>\n')
        html.write('<table border="2" width="30%">\n')
        html.write('<tbody>\n')
        sample = fits.getdata(samplefile, 1)
        for gal in sample:
            # Add coordinates and sizes here.
            galaxy = gal['GALAXY'].strip().lower()
            html.write('<tr>\n')
            html.write('<td><a href="html/{}.html">{}</a></td>\n'.format(
                galaxy, galaxy.upper()))
            html.write(
                '<td><a href="http://legacysurvey.org/viewer/?ra={:.6f}&dec={:.6f}" target="_blank"><img src=cutouts/{}-image-thumb.jpg alt={} /></a></td>\n'
                .format(gal['RA'], gal['DEC'], galaxy, galaxy.upper()))
            #           html.write('<td><a href="html/{}.html"><img src=cutouts/{}-image-thumb.jpg alt={} /></a></td>\n'.format(galaxy, galaxy, galaxy.upper()))
            html.write('</tr>\n')
        html.write('</tbody>\n')
        html.write('</table>\n')
        html.write('</body></html>\n')
        html.close()

        sys.exit(1)

        # individual galaxy pages
        for gal in sample[:3]:
            galaxy = gal['GALAXY'].strip().lower()
            html = open(os.path.join(largedir, 'html/{}.html'.format(galaxy)),
                        'w')
            html.write('<html><body>\n')
            html.write(
                '<a href=../cutouts/{}.jpg><img src=../cutouts/{}-image.jpg alt={} /></a>\n'
                .format(galaxy, galaxy, galaxy, galaxy.upper()))
            html.write('</body></html>\n')
            html.close()

    # --------------------------------------------------
    # Get cutouts of all the CCDs for each galaxy.
    if args.ccd_cutouts:
        sample = fits.getdata(samplefile, 1)

        for gal in sample[1:2]:
            galaxy = gal['GALAXY'].strip().lower()
            ccdsfile = os.path.join(largedir, 'ccds',
                                    '{}-ccds.fits'.format(galaxy))
            ccds = fits.getdata(ccdsfile)

            pdb.set_trace()

    # --------------------------------------------------
    # Run the pipeline.
    if args.runbrick:
        sample = fits.getdata(samplefile, 1)

        for gal in sample[1:2]:
            galaxy = gal['GALAXY'].strip().lower()
            diam = 10 * np.ceil(gal['RADIUS'] / PIXSCALE).astype(
                'int16')  # [pixels]

            # Note: zoom is relative to the center of an imaginary brick with
            # dimensions (0, 3600, 0, 3600).
            survey = LegacySurveyData(version='dr2', output_dir=largedir)
            run_brick(None,
                      survey,
                      radec=(gal['RA'], gal['DEC']),
                      blobxy=zip([diam / 2], [diam / 2]),
                      threads=1,
                      zoom=(1800 - diam / 2, 1800 + diam / 2, 1800 - diam / 2,
                            1800 + diam / 2),
                      wise=False,
                      forceAll=True,
                      writePickles=False,
                      do_calibs=False,
                      write_metrics=False,
                      pixPsf=True,
                      splinesky=True,
                      early_coadds=True,
                      stages=['writecat'],
                      ceres=False)

            pdb.set_trace()
Example #57
0
    
if __name__ == '__main__':
    ps = PlotSequence('morph')

    from glob import glob
    from astrometry.util.fits import merge_tables, fits_table

    # glob doesn't understand {}
    #fns = glob('dr3/tractor/011/tractor-011?p0{02,05,07,10}.fits')
    fns = glob('dr3/tractor/011/tractor-011?p0*.fits')
    #fns = glob('dr3/metrics/011/all-models-011?p0*.fits')
    fns = [fn for fn in fns if fn[-7:-5] in ['02','05','07','10']]
    assert(len(fns) == 16)

    T = merge_tables([fits_table(fn) for fn in fns])
    print(len(T), 'sources')

    dchipsf  = T.dchisq[:,0]
    dchisimp = T.dchisq[:,1]
    dchidev  = T.dchisq[:,2]
    dchiexp  = T.dchisq[:,3]

    model = np.array(['P' if p > s else 'S' for p,s in zip(dchipsf, dchisimp)])
    dchi = dchipsf * (model == 'P') + dchisimp * (model == 'S')

    # which sources have galaxy models computed
    I = np.flatnonzero(dchiexp)
    print(len(I), 'have EXP,DEV models')
    
    galaxy_margin = 12.
Example #58
0
                continue
            print 'Reading', fn
            T = fits_table(fn)
            ok,xx,yy = chipwcs.radec2pixelxy(T.ra, T.dec)
            W,H = chipwcs.get_width(), chipwcs.get_height()
            I = np.flatnonzero((xx >= -margin) * (xx <= (W+margin)) *
                               (yy >= -margin) * (yy <= (H+margin)))
            T.cut(I)
            print 'Cut to', len(T), 'sources within image + margin'
            #print 'Brick_primary:', np.unique(T.brick_primary)
            T.cut(T.brick_primary)
            print 'Cut to', len(T), 'on brick_primary'
            T.cut((T.out_of_bounds == False) * (T.left_blob == False))
            print 'Cut to', len(T), 'on out_of_bounds and left_blob'
            TT.append(T)
        T = merge_tables(TT)
        T._header = TT[0]._header
        del TT
        #T.writeto('cat.fits')

        # Fix up various failure modes:
        # FixedCompositeGalaxy(pos=RaDecPos[240.51147402832561, 10.385488075518923], brightness=NanoMaggies: g=(flux -2.87), r=(flux -5.26), z=(flux -7.65), fracDev=FracDev(0.60177207), shapeExp=re=3.78351e-44, e1=9.30367e-13, e2=1.24392e-16, shapeDev=re=inf, e1=-0, e2=-0)
        # -> convert to EXP
        I = np.flatnonzero(np.array([((t.type == 'COMP') and 
                                      (not np.isfinite(t.shapedev_r)))
                                     for t in T]))
        if len(I):
            print 'Converting', len(I), 'bogus COMP galaxies to EXP'
            for i in I:
                T.type[i] = 'EXP'
def main(decals=None, opt=None):
    '''Driver function for forced photometry of individual DECam images.
    '''
    if opt is None:
        parser = get_parser()
        opt = parser.parse_args()

    Time.add_measurement(MemMeas)
    t0 = Time()

    if os.path.exists(opt.outfn):
        print('Ouput file exists:', opt.outfn)
        sys.exit(0)

    if not opt.forced:
        opt.apphot = True

    zoomslice = None
    if opt.zoom is not None:
        (x0,x1,y0,y1) = opt.zoom
        zoomslice = (slice(y0,y1), slice(x0,x1))

    ps = None
    if opt.plots is not None:
        from astrometry.util.plotutils import PlotSequence
        ps = PlotSequence(opt.plots)

    # Try parsing filename as exposure number.
    try:
        expnum = int(opt.filename)
        opt.filename = None
    except:
        # make this 'None' for decals.find_ccds()
        expnum = None

    # Try parsing HDU number
    try:
        opt.hdu = int(opt.hdu)
        ccdname = None
    except:
        ccdname = opt.hdu
        opt.hdu = -1

    if decals is None:
        decals = Decals()

    if opt.filename is not None and opt.hdu >= 0:
        # Read metadata from file
        T = exposure_metadata([opt.filename], hdus=[opt.hdu])
        print('Metadata:')
        T.about()
    else:
        # Read metadata from decals-ccds.fits table
        T = decals.find_ccds(expnum=expnum, ccdname=ccdname)
        print(len(T), 'with expnum', expnum, 'and CCDname', ccdname)
        if opt.hdu >= 0:
            T.cut(T.image_hdu == opt.hdu)
            print(len(T), 'with HDU', opt.hdu)
        if opt.filename is not None:
            T.cut(np.array([f.strip() == opt.filename for f in T.image_filename]))
            print(len(T), 'with filename', opt.filename)
        assert(len(T) == 1)

    im = decals.get_image_object(T[0])
    tim = im.get_tractor_image(slc=zoomslice, pixPsf=True, splinesky=True)
    print('Got tim:', tim)

    if opt.catfn in ['DR1', 'DR2']:
        if opt.catalog_path is None:
            opt.catalog_path = opt.catfn.lower()

        margin = 20
        TT = []
        chipwcs = tim.subwcs
        bricks = bricks_touching_wcs(chipwcs, decals=decals)
        for b in bricks:
            # there is some overlap with this brick... read the catalog.
            fn = os.path.join(opt.catalog_path, 'tractor', b.brickname[:3],
                              'tractor-%s.fits' % b.brickname)
            if not os.path.exists(fn):
                print('WARNING: catalog', fn, 'does not exist.  Skipping!')
                continue
            print('Reading', fn)
            T = fits_table(fn)
            ok,xx,yy = chipwcs.radec2pixelxy(T.ra, T.dec)
            W,H = chipwcs.get_width(), chipwcs.get_height()
            I = np.flatnonzero((xx >= -margin) * (xx <= (W+margin)) *
                               (yy >= -margin) * (yy <= (H+margin)))
            T.cut(I)
            print('Cut to', len(T), 'sources within image + margin')
            # print('Brick_primary:', np.unique(T.brick_primary))
            T.cut(T.brick_primary)
            print('Cut to', len(T), 'on brick_primary')
            T.cut((T.out_of_bounds == False) * (T.left_blob == False))
            print('Cut to', len(T), 'on out_of_bounds and left_blob')
            TT.append(T)
        T = merge_tables(TT)
        T._header = TT[0]._header
        del TT

        # Fix up various failure modes:
        # FixedCompositeGalaxy(pos=RaDecPos[240.51147402832561, 10.385488075518923], brightness=NanoMaggies: g=(flux -2.87), r=(flux -5.26), z=(flux -7.65), fracDev=FracDev(0.60177207), shapeExp=re=3.78351e-44, e1=9.30367e-13, e2=1.24392e-16, shapeDev=re=inf, e1=-0, e2=-0)
        # -> convert to EXP
        I = np.flatnonzero(np.array([((t.type == 'COMP') and
                                      (not np.isfinite(t.shapedev_r)))
                                     for t in T]))
        if len(I):
            print('Converting', len(I), 'bogus COMP galaxies to EXP')
            for i in I:
                T.type[i] = 'EXP'

        # Same thing with the exp component.
        # -> convert to DEV
        I = np.flatnonzero(np.array([((t.type == 'COMP') and
                                      (not np.isfinite(t.shapeexp_r)))
                                     for t in T]))
        if len(I):
            print('Converting', len(I), 'bogus COMP galaxies to DEV')
            for i in I:
                T.type[i] = 'DEV'

        if opt.write_cat:
            T.writeto(opt.write_cat)
            print('Wrote catalog to', opt.write_cat)

    else:
        T = fits_table(opt.catfn)

    T.shapeexp = np.vstack((T.shapeexp_r, T.shapeexp_e1, T.shapeexp_e2)).T
    T.shapedev = np.vstack((T.shapedev_r, T.shapedev_e1, T.shapedev_e2)).T

    cat = read_fits_catalog(T, ellipseClass=tractor.ellipses.EllipseE)
    # print('Got cat:', cat)

    print('Forced photom...')
    opti = None
    if opt.ceres:
        from tractor.ceres_optimizer import CeresOptimizer
        B = 8
        opti = CeresOptimizer(BW=B, BH=B)

    tr = Tractor([tim], cat, optimizer=opti)
    tr.freezeParam('images')
    for src in cat:
        src.freezeAllBut('brightness')
        src.getBrightness().freezeAllBut(tim.band)

    F = fits_table()
    F.brickid   = T.brickid
    F.brickname = T.brickname
    F.objid     = T.objid

    F.filter  = np.array([tim.band]               * len(T))
    F.mjd     = np.array([tim.primhdr['MJD-OBS']] * len(T))
    F.exptime = np.array([tim.primhdr['EXPTIME']] * len(T))

    ok,x,y = tim.sip_wcs.radec2pixelxy(T.ra, T.dec)
    F.x = (x-1).astype(np.float32)
    F.y = (y-1).astype(np.float32)

    if opt.apphot:
        import photutils

        img = tim.getImage()
        ie = tim.getInvError()
        with np.errstate(divide='ignore'):
            imsigma = 1. / ie
        imsigma[ie == 0] = 0.

        apimg = []
        apimgerr = []

        # Aperture photometry locations
        xxyy = np.vstack([tim.wcs.positionToPixel(src.getPosition()) for src in cat]).T
        apxy = xxyy - 1.

        apertures = apertures_arcsec / tim.wcs.pixel_scale()
        print('Apertures:', apertures, 'pixels')

        for rad in apertures:
            aper = photutils.CircularAperture(apxy, rad)
            p = photutils.aperture_photometry(img, aper, error=imsigma)
            apimg.append(p.field('aperture_sum'))
            apimgerr.append(p.field('aperture_sum_err'))
        ap = np.vstack(apimg).T
        ap[np.logical_not(np.isfinite(ap))] = 0.
        F.apflux = ap
        ap = 1./(np.vstack(apimgerr).T)**2
        ap[np.logical_not(np.isfinite(ap))] = 0.
        F.apflux_ivar = ap

    if opt.forced:
        kwa = {}
        if opt.plots is None:
            kwa.update(wantims=False)

        R = tr.optimize_forced_photometry(variance=True, fitstats=True,
                                          shared_params=False, **kwa)

        if opt.plots:
            (data,mod,ie,chi,roi) = R.ims1[0]

            ima = tim.ima
            imchi = dict(interpolation='nearest', origin='lower', vmin=-5, vmax=5)
            plt.clf()
            plt.imshow(data, **ima)
            plt.title('Data: %s' % tim.name)
            ps.savefig()

            plt.clf()
            plt.imshow(mod, **ima)
            plt.title('Model: %s' % tim.name)
            ps.savefig()

            plt.clf()
            plt.imshow(chi, **imchi)
            plt.title('Chi: %s' % tim.name)
            ps.savefig()

        F.flux = np.array([src.getBrightness().getFlux(tim.band)
                           for src in cat]).astype(np.float32)
        F.flux_ivar = R.IV.astype(np.float32)

        F.fracflux = R.fitstats.profracflux.astype(np.float32)
        F.rchi2    = R.fitstats.prochi2    .astype(np.float32)

    program_name = sys.argv[0]
    version_hdr = get_version_header(program_name, decals.decals_dir)
    # HACK -- print only two directory names + filename of CPFILE.
    fname = os.path.basename(im.imgfn)
    d = os.path.dirname(im.imgfn)
    d1 = os.path.basename(d)
    d = os.path.dirname(d)
    d2 = os.path.basename(d)
    fname = os.path.join(d2, d1, fname)
    print('Trimmed filename to', fname)
    #version_hdr.add_record(dict(name='CPFILE', value=im.imgfn, comment='DECam comm.pipeline file'))
    version_hdr.add_record(dict(name='CPFILE', value=fname, comment='DECam comm.pipeline file'))
    version_hdr.add_record(dict(name='CPHDU', value=im.hdu, comment='DECam comm.pipeline ext'))
    version_hdr.add_record(dict(name='CAMERA', value='DECam', comment='Dark Energy Camera'))
    version_hdr.add_record(dict(name='EXPNUM', value=im.expnum, comment='DECam exposure num'))
    version_hdr.add_record(dict(name='CCDNAME', value=im.ccdname, comment='DECam CCD name'))
    version_hdr.add_record(dict(name='FILTER', value=tim.band, comment='Bandpass of this image'))
    version_hdr.add_record(dict(name='EXPOSURE', value='decam-%s-%s' % (im.expnum, im.ccdname), comment='Name of this image'))

    keys = ['TELESCOP','OBSERVAT','OBS-LAT','OBS-LONG','OBS-ELEV',
            'INSTRUME']
    for key in keys:
        if key in tim.primhdr:
            version_hdr.add_record(dict(name=key, value=tim.primhdr[key]))

    hdr = fitsio.FITSHDR()

    units = {'mjd':'sec', 'exptime':'sec', 'flux':'nanomaggy',
             'flux_ivar':'1/nanomaggy^2'}
    columns = F.get_columns()
    for i,col in enumerate(columns):
        if col in units:
            hdr.add_record(dict(name='TUNIT%i' % (i+1), value=units[col]))

    outdir = os.path.dirname(opt.outfn)
    if len(outdir):
        trymakedirs(outdir)
    fitsio.write(opt.outfn, None, header=version_hdr, clobber=True)
    F.writeto(opt.outfn, header=hdr, append=True)
    print('Wrote', opt.outfn)

    print('Finished forced phot:', Time()-t0)
    return 0