def redo_query(root='j023507-040202',
               instruments=['WFC3/IR', 'WFC3/UVIS', 'ACS/WFC'],
               filters=[],
               proposal_id=[]):
    """
    Redo the MAST query based on the parent table to look for new data
    """
    from astropy.io.misc import yaml
    from mastquery import query, overlaps
    from grizli import utils

    # Read the existing catalog
    parent = utils.read_catalog('{0}_footprint.fits'.format(root))

    # Overlap keywords, if available
    kwargs = {}
    if 'BUFFER' in parent.meta:
        kwargs['buffer_arcmin'] = parent.meta['BUFFER']

    if 'FOLAP' in parent.meta:
        kwargs['fractional_overlap'] = parent.meta['FOLAP']

    if 'MIN_AREA' in parent.meta:
        kwargs['min_area'] = parent.meta['MIN_AREA']

    # Rerun overlap query to get new data
    tabs = overlaps.find_overlaps(parent,
                                  filters=filters,
                                  proposal_id=proposal_id,
                                  instruments=instruments,
                                  close=True,
                                  keep_single_name=True,
                                  **kwargs)
Example #2
0
def group_by_filter():
    """
    aws s3 sync --exclude "*" --include "cosmos_visits*" s3://grizli-preprocess/CosmosMosaic/ ./

    """
    from grizli import prep, utils
    import numpy as np

    master = 'cosmos'
    master = 'grizli-cosmos-v2'
    master = 'grizli-jan2019'
    master = 'grizli-v1-19.12.04'
    master = 'grizli-v1-19.12.05'

    tab = utils.read_catalog('{0}_visits.fits'.format(master))
    all_visits = np.load('{0}_visits.npy'.format(master), allow_pickle=True)[0]

    # By filter

    # Exclude DASH
    dash = utils.column_string_operation(tab['product'], 'icxe', 'startswith')
    dash |= utils.column_string_operation(tab['product'], '_icxe',
                                         'count', 'or')

    # Don't exclude DASH
    dash = utils.column_string_operation(tab['product'], 'xxxx', 'startswith')

    groups = {}
    fpstr = {}

    for filt in np.unique(tab['filter']):
        mat = (tab['filter'] == filt) & (~dash)
        groups[filt] = {'filter': filt, 'files': [], 'awspath': [], 'footprints': []}
        fpstr[filt] = 'fk5\n'

        for ix in np.where(mat)[0]:
            fp = all_visits[ix]['footprint']
            if hasattr(fp, '__len__'):
                fps = fp
            else:
                fps = [fp]
            for fp in fps:
                xy = fp.boundary.xy
                pstr = 'polygon('+','.join(['{0:.6f}'.format(i) for i in np.array([xy[0].tolist(), xy[1].tolist()]).T.flatten()])+') # text={{{0}}}\n'.format(all_visits[ix]['product'])

                fpstr[filt] += pstr

            for k in ['files', 'awspath', 'footprints']:
                groups[filt][k].extend(all_visits[ix][k])

        fp = open('{0}-pointings-{1}.reg'.format(master, filt), 'w')
        fp.write(fpstr[filt])
        fp.close()

        print('{0:6} {1:>3d} {2:>4d} ({3:>4d})'.format(filt, mat.sum(), len(groups[filt]['files']), len(np.unique(groups[filt]['files']))))

    np.save('{0}_filter_groups.npy'.format(master), [groups])
Example #3
0
def make_masks(files=None, inspect='grizli_inspect.fits', ext=1):
    """
    Make satellite trail masks
    """
    import os
    import astropy.io.fits as pyfits

    try:
        from .. import utils
        from ..ds9 import DS9
    except:
        from grizli import utils
        from grizli.ds9 import DS9

    if files is None:
        insp = utils.read_catalog(inspect)
        flag = (insp['satellite'] > 0) | (insp['earth'] > 0)
        files = [
            f.replace('_ramp.png', '_flt.fits') for f in insp['images'][flag]
        ]

    ds9 = DS9()
    for file in files:
        im = pyfits.open(file)
        med = np.median(im['SCI', ext].data)
        ds9.view(im['SCI', ext].data - med)
        reg_file = file.replace('_flt.fits', '.*.mask.reg').replace(
            '_flc.fits', '.*.mask.reg').replace('_c0m.fits',
                                                '.*.mask.reg').replace(
                                                    '*', '{0:02d}'.format(ext))
        if os.path.exists(reg_file):
            ds9.set('regions file ' + reg_file)

        x = input(file + ': draw region (x to skip, q to abort): ')
        if x in ['q']:
            print('Abort.')
            #continue
            return False
        elif x in ['x']:
            print('Skip {0}.'.format(file))
            continue

        ds9.set('regions save ' + reg_file)
Example #4
0
def make_masks(files=None, inspect='grizli_inspect.fits', ext=1):
    """
    Make satellite trail masks
    """
    import os
    import astropy.io.fits as pyfits
    
    try:
        from .. import utils
        from ..ds9 import DS9
    except:
        from grizli import utils
        from grizli.ds9 import DS9
        
    if files is None:
        insp = utils.read_catalog(inspect)
        flag = (insp['satellite'] > 0) | (insp['earth'] > 0)
        files=[f.replace('_ramp.png','_flt.fits') for f in insp['images'][flag]]
    
    ds9 = DS9()
    for file in files:
        im = pyfits.open(file)
        med = np.median(im['SCI',ext].data)
        ds9.view(im['SCI',ext].data-med)
        reg_file = file.replace('_flt.fits','.*.mask.reg').replace('_flc.fits','.*.mask.reg').replace('_c0m.fits','.*.mask.reg').replace('*', '{0:02d}'.format(ext))
        if os.path.exists(reg_file):
            ds9.set('regions file '+reg_file)
        
        x = input(file+': draw region (x to skip, q to abort): ')
        if x in ['q']:
            print('Abort.')
            #continue
            return False
        elif x in ['x']:
            print('Skip {0}.'.format(file))
            continue
        
        ds9.set('regions save '+reg_file)
Example #5
0
def eazy_photoz(root,
                force=False,
                object_only=True,
                apply_background=True,
                aper_ix=1,
                apply_prior=True,
                beta_prior=True,
                get_external_photometry=True,
                external_limits=3,
                external_sys_err=0.3,
                external_timeout=300,
                sys_err=0.05,
                z_step=0.01,
                z_min=0.01,
                z_max=12,
                total_flux='flux'):

    import os
    import eazy
    import numpy as np

    from grizli import utils
    import mastquery.utils

    if (os.path.exists('{0}.eazypy.self.npy'.format(root))) & (not force):
        self = np.load('{0}.eazypy.self.npy'.format(root))[0]
        zout = utils.read_catalog('{0}.eazypy.zout.fits'.format(root))
        cat = utils.read_catalog('{0}_phot_apcorr.fits'.format(root))
        return self, cat, zout

    trans = {
        'f098m': 201,
        'f105w': 202,
        'f110w': 241,
        'f125w': 203,
        'f140w': 204,
        'f160w': 205,
        'f435w': 233,
        'f438w': 211,
        'f606w': 236,
        'f625w': 237,
        'f814w': 239,
        'f702w': 15,
        'f555w': 235,
        'f350lp': 339
    }

    cat = utils.read_catalog('{0}_phot.fits'.format(root))
    filters = []
    for c in cat.meta:
        if c.endswith('_ZP'):
            filters.append(c.split('_ZP')[0].lower())

    if get_external_photometry:
        print('Get external photometry from Vizier')
        try:
            ext = get_external_catalog(cat,
                                       external_limits=external_limits,
                                       timeout=external_timeout,
                                       sys_err=external_sys_err)
            for c in ext.colnames:
                if c not in cat.colnames:
                    cat[c] = ext[c]

            for k in ext.meta:
                cat.meta[k] = ext.meta[k]
        except:
            print(' - External catalog FAILED')
            pass

    # Total flux
    cat.meta['TOTALCOL'] = total_flux, 'Column for total flux'

    apcorr = {}
    for i in range(5):
        if 'flux_aper_{0}'.format(i) in cat.colnames:
            cat['apcorr_{0}'.format(
                i)] = cat[total_flux] / cat['flux_aper_{0}'.format(i)]
            for f in filters:
                bkgc = '{0}_bkg_aper_{1}'.format(f, i)
                if (bkgc in cat.colnames) & apply_background:
                    bkg = cat[bkgc]
                else:
                    bkg = 0.

                cat['{0}_corr_{1}'.format(
                    f, i)] = (cat['{0}_flux_aper_{1}'.format(f, i)] -
                              bkg) * cat['apcorr_{0}'.format(i)]
                cat['{0}_ecorr_{1}'.format(
                    f, i)] = cat['{0}_fluxerr_aper_{1}'.format(
                        f, i)] * cat['apcorr_{0}'.format(i)]

                # mask_thresh = np.percentile(cat['{0}_mask_aper_{1}'.format(f, i)], 95)
                aper_area = np.pi * (cat.meta['APER_{0}'.format(i)] / 2)**2
                mask_thresh = aper_area

                bad = cat['{0}_mask_aper_{1}'.format(f, i)] > 0.2 * mask_thresh
                cat['{0}_corr_{1}'.format(f, i)][bad] = -99
                cat['{0}_ecorr_{1}'.format(f, i)][bad] = -99

    cat.rename_column('number', 'id')
    cat['z_spec'] = cat['id'] * 0. - 1

    # Spurious sources, sklearn SVM model trained for a single field
    morph_model = os.path.join(os.path.dirname(utils.__file__),
                               'data/sep_catalog_junk.pkl')

    if os.path.exists(morph_model):
        from sklearn.externals import joblib
        clf = joblib.load(morph_model)
        X = np.hstack([[cat['peak'] / cat['flux'],
                        cat['cpeak'] / cat['peak']]]).T

        # Predict labels, which where generated for
        #    bad_bright, bad_faint, stars, big_galaxies, small_galaxies
        pred = clf.predict_proba(X)

        # Should be >~ 0.9 for valid sources, stars & galaxies in "ir" image
        cat['class_valid'] = pred[:, -3:].sum(axis=1)
        cat['class_valid'].format = '.2f'

    cat.write('{0}_phot_apcorr.fits'.format(root), overwrite=True)

    # Translate
    fp = open('zphot.translate', 'w')
    for f in filters:
        if f in trans:
            fp.write('{0}_corr_{1} F{2}\n'.format(f, aper_ix, trans[f]))
            fp.write('{0}_ecorr_{1} E{2}\n'.format(f, aper_ix, trans[f]))

    fp.close()

    params = {}
    params['CATALOG_FILE'] = '{0}_phot_apcorr.fits'.format(root)
    params['Z_STEP'] = z_step
    params['MAIN_OUTPUT_FILE'] = '{0}.eazypy'.format(root)

    params['Z_MAX'] = z_max
    params['MW_EBV'] = mastquery.utils.get_irsa_dust(cat['ra'].mean(),
                                                     cat['dec'].mean())
    params['PRIOR_ABZP'] = 23.9

    params['SYS_ERR'] = sys_err
    params['CAT_HAS_EXTCORR'] = False

    # Pick prior filter, starting from reddest
    for f in [
            'f435w', 'f606w', 'f814w', 'f105w', 'f110w', 'f125w', 'f140w',
            'f160w'
    ][::-1]:
        if f in filters:
            params['PRIOR_FILTER'] = trans[f]
            mag = 23.9 - 2.5 * np.log10(cat['{0}_corr_{1}'.format(f, aper_ix)])
            break
    #
    params['TEMPLATES_FILE'] = 'templates/fsps_full/tweak_fsps_QSF_12_v3.param'

    zpfile = None
    load_products = False

    eazy.symlink_eazy_inputs(
        path='/usr/local/share/python/eazy-py/eazy-photoz', path_is_env=False)

    self = eazy.photoz.PhotoZ(param_file=None,
                              translate_file='zphot.translate',
                              zeropoint_file=zpfile,
                              params=params,
                              load_prior=True,
                              load_products=load_products)

    if object_only:
        return self

    idx = np.arange(self.NOBJ)

    #sample = (mag < 27) #& (self.cat['star_flag'] != 1)
    #sample |= (self.cat['z_spec'] > 0)
    sample = np.isfinite(mag)

    for iter in range(2):
        self.fit_parallel(idx[sample], n_proc=10)
        self.error_residuals()

    self.standard_output(prior=apply_prior, beta_prior=beta_prior)

    zout = utils.read_catalog('{0}.eazypy.zout.fits'.format(root))

    np.save('{0}.eazypy.self.npy'.format(root), [self])

    return self, cat, zout
Example #6
0
def drizzle_images(label='macs0647-jd1', ra=101.9822125, dec=70.24326667, pixscale=0.1, size=10, wcs=None, pixfrac=0.33, kernel='square', theta=0, half_optical_pixscale=True, filters=['f160w', 'f140w', 'f125w', 'f105w', 'f110w', 'f098m', 'f850lp', 'f814w', 'f775w', 'f606w', 'f475w', 'f555w', 'f600lp', 'f390w', 'f350lp'], skip=None, remove=True, rgb_params=RGB_PARAMS, master='grizli-jan2019', aws_bucket='s3://grizli/CutoutProducts/', scale_ab=21, thumb_height=2.0, sync_fits=True, subtract_median=True, include_saturated=True, include_ir_psf=False, show_filters=['visb', 'visr', 'y', 'j', 'h'], combine_similar_filters=True, single_output=True, aws_prep_dir=None, make_segmentation_figure=False, get_dict=False, dryrun=False, **kwargs):
    """
    label='cp561356'; ra=150.208875; dec=1.850241667; size=40; filters=['f160w','f814w', 'f140w','f125w','f105w','f606w','f475w']

    master: These are sets of large lists of available exposures

        'cosmos': deprecated
        'grizli-cosmos-v2': All imaging covering the COSMOS field
        'candels-july2019': CANDELS fields other than COSMOS
        'grizli-v1': First processing of the Grizli CHArGE dataset
        'grizli-v1-19.12.04': Updated CHArGE fields

    """
    import glob
    import copy
    import os

    import numpy as np

    import astropy.io.fits as pyfits
    from astropy.coordinates import SkyCoord
    import astropy.units as u
    from drizzlepac.adrizzle import do_driz

    import boto3

    from grizli import prep, utils
    from grizli.pipeline import auto_script

    # Function arguments
    if get_dict:
        frame = inspect.currentframe()
        args = inspect.getargvalues(frame).locals

        pop_args = ['get_dict', 'frame', 'kwargs']
        pop_classes = (np.__class__, do_driz.__class__, SkyCoord.__class__)

        for k in kwargs:
            args[k] = kwargs[k]

        for k in args:
            if isinstance(args[k], pop_classes):
                pop_args.append(k)

        for k in pop_args:
            if k in args:
                args.pop(k)

        return args

    # Boto objects
    s3 = boto3.resource('s3')
    s3_client = boto3.client('s3')

    if isinstance(ra, str):
        coo = SkyCoord('{0} {1}'.format(ra, dec), unit=(u.hour, u.deg))
        ra, dec = coo.ra.value, coo.dec.value

    if label is None:
        try:
            import mastquery.utils
            label = mastquery.utils.radec_to_targname(ra=ra, dec=dec, round_arcsec=(1/15, 1), targstr='j{rah}{ram}{ras}{sign}{ded}{dem}{des}')
        except:
            label = 'grizli-cutout'

    #master = 'cosmos'
    #master = 'grizli-jan2019'

    if master == 'grizli-jan2019':
        parent = 's3://grizli/MosaicTools/'
        bkt = s3.Bucket('grizli')
    elif master == 'cosmos':
        parent = 's3://grizli-preprocess/CosmosMosaic/'
        bkt = s3.Bucket('grizli-preprocess')
    elif master == 'grizli-cosmos-v2':
        parent = 's3://grizli-cosmos-v2/Mosaics/'
        bkt = s3.Bucket('grizli-cosmos-v2')
    elif master == 'candels-july2019':
        parent = 's3://grizli-v1/Mosaics/'
        bkt = s3.Bucket('grizli-v1')
    elif master == 'grizli-v1-19.12.04':
        parent = 's3://grizli-v1/Mosaics/'
        bkt = s3.Bucket('grizli-v1')
    elif master == 'grizli-v1-19.12.05':
        parent = 's3://grizli-v1/Mosaics/'
        bkt = s3.Bucket('grizli-v1')
    else:
        # Run on local files, e.g., "Prep" directory
        parent = None
        bkt = None
        #remove = False

    # Download summary files from S3
    for ext in ['_visits.fits', '_visits.npy', '_filter_groups.npy'][-1:]:
        newfile = '{0}{1}'.format(master, ext)
        if (not os.path.exists(newfile)) & (parent is not None):

            s3_path = parent.split('/')[-2]
            s3_file = '{0}{1}'.format(master, ext)
            print('{0}{1}'.format(parent, s3_file))
            bkt.download_file(s3_path+'/'+s3_file, s3_file,
                              ExtraArgs={"RequestPayer": "requester"})

            #os.system('aws s3 cp {0}{1}{2} ./'.format(parent, master, ext))

    #tab = utils.read_catalog('{0}_visits.fits'.format(master))
    #all_visits = np.load('{0}_visits.npy'.format(master))[0]
    if parent is not None:
        groups = np.load('{0}_filter_groups.npy'.format(master), allow_pickle=True)[0]
    else:

        if aws_prep_dir is not None:
            spl = aws_prep_dir.replace('s3://', '').split('/')
            prep_bucket = spl[0]
            prep_root = spl[2]

            prep_bkt = s3.Bucket(prep_bucket)

            s3_prep_path = 'Pipeline/{0}/Prep/'.format(prep_root)
            s3_full_path = '{0}/{1}'.format(prep_bucket, s3_prep_path)
            s3_file = '{0}_visits.npy'.format(prep_root)

            # Make output path Prep/../Thumbnails/
            if aws_bucket is not None:
                aws_bucket = ('s3://' +
                              s3_full_path.replace('/Prep/', '/Thumbnails/'))

            print('{0}{1}'.format(s3_prep_path, s3_file))
            if not os.path.exists(s3_file):
                prep_bkt.download_file(os.path.join(s3_prep_path, s3_file),
                            s3_file, ExtraArgs={"RequestPayer": "requester"})

            groups_files = glob.glob('{0}_filter_groups.npy'.format(prep_root))
            visit_query = prep_root+'_'
        else:
            groups_files = glob.glob('*filter_groups.npy')
            visit_query = '*'

        # Reformat local visits.npy into a groups file
        if (len(groups_files) == 0):

            visit_file = glob.glob(visit_query+'visits.npy')[0]

            visits, groups, info = np.load(visit_file, allow_pickle=True)
            visit_root = visit_file.split('_visits')[0]

            visit_filters = np.array([v['product'].split('-')[-1] for v in visits])
            groups = {}
            for filt in np.unique(visit_filters):
                groups[filt] = {}
                groups[filt]['filter'] = filt
                groups[filt]['files'] = []
                groups[filt]['footprints'] = []
                groups[filt]['awspath'] = []

                ix = np.where(visit_filters == filt)[0]
                for i in ix:
                    groups[filt]['files'].extend(visits[i]['files'])
                    groups[filt]['footprints'].extend(visits[i]['footprints'])

                Nf = len(groups[filt]['files'])
                print('{0:>6}: {1:>3} exposures'.format(filt, Nf))

                if aws_prep_dir is not None:
                    groups[filt]['awspath'] = [s3_full_path
                                               for file in range(Nf)]

            np.save('{0}_filter_groups.npy'.format(visit_root), [groups])

        else:
            print('Use groups file: {0}'.format(groups_files[0]))

            groups = np.load(groups_files[0], allow_pickle=True)[0]

    #filters = ['f160w','f814w', 'f110w', 'f098m', 'f140w','f125w','f105w','f606w', 'f475w']

    filt_dict = FilterDict()
    filt_dict.meta['label'] = label
    filt_dict.meta['ra'] = ra
    filt_dict.meta['dec'] = dec
    filt_dict.meta['size'] = size
    filt_dict.meta['master'] = master
    filt_dict.meta['parent'] = parent

    if filters is None:
        filters = list(groups.keys())

    has_filts = []
    lower_filters = [f.lower() for f in filters]
    for filt in lower_filters:
        if filt not in groups:
            continue

        visits = [copy.deepcopy(groups[filt])]
        #visits[0]['reference'] = 'CarlosGG/ak03_j1000p0228/Prep/ak03_j1000p0228-f160w_drz_sci.fits'

        visits[0]['product'] = label+'-'+filt

        if wcs is None:
            hdu = utils.make_wcsheader(ra=ra, dec=dec, size=size, pixscale=pixscale, get_hdu=True, theta=theta)

            h = hdu.header
        else:
            h = utils.to_header(wcs)

        if (filt[:2] in ['f0', 'f1', 'g1']) | (not half_optical_pixscale):
            #data = hdu.data
            pass
        else:
            for k in ['NAXIS1', 'NAXIS2', 'CRPIX1', 'CRPIX2']:
                h[k] *= 2

            h['CRPIX1'] -= 0.5
            h['CRPIX2'] -= 0.5

            for k in ['CD1_1', 'CD1_2', 'CD2_1', 'CD2_2']:
                if k in h:
                    h[k] /= 2

            #data = np.zeros((h['NAXIS2'], h['NAXIS1']), dtype=np.int16)

        #pyfits.PrimaryHDU(header=h, data=data).writeto('ref.fits', overwrite=True, output_verify='fix')
        #visits[0]['reference'] = 'ref.fits'

        print('\n\n###\nMake filter: {0}'.format(filt))

        if (filt.upper() in ['F105W', 'F110W', 'F125W', 'F140W', 'F160W']) & include_ir_psf:
            clean_i = False
        else:
            clean_i = remove

        status = utils.drizzle_from_visit(visits[0], h, pixfrac=pixfrac, kernel=kernel, clean=clean_i, include_saturated=include_saturated, skip=skip, dryrun=dryrun)

        if dryrun:
            filt_dict[filt] = status
            continue

        elif status is not None:
            sci, wht, outh, filt_dict[filt] = status

            if subtract_median:
                #med = np.median(sci[sci != 0])
                try:
                    un_data = np.unique(sci[(sci != 0) & np.isfinite(sci)])
                    med = utils.mode_statistic(un_data)
                except:
                    med = 0.

                if not np.isfinite(med):
                    med = 0.

                print('\n\nMedian {0} = {1:.3f}\n\n'.format(filt, med))
                outh['IMGMED'] = (med, 'Median subtracted from the image')
            else:
                med = 0.
                outh['IMGMED'] = (0., 'Median subtracted from the image')

            pyfits.writeto('{0}-{1}_drz_sci.fits'.format(label, filt),
                           data=sci, header=outh, overwrite=True,
                           output_verify='fix')

            pyfits.writeto('{0}-{1}_drz_wht.fits'.format(label, filt),
                           data=wht, header=outh, overwrite=True,
                           output_verify='fix')

            has_filts.append(filt)

            if (filt.upper() in ['F105W', 'F110W', 'F125W', 'F140W', 'F160W']) & include_ir_psf:
                from grizli.galfit.psf import DrizzlePSF

                hdu = pyfits.open('{0}-{1}_drz_sci.fits'.format(label, filt),
                                  mode='update')

                flt_files = []  # visits[0]['files']
                for i in range(1, 10000):
                    key = 'FLT{0:05d}'.format(i)
                    if key not in hdu[0].header:
                        break

                    flt_files.append(hdu[0].header[key])

                try:

                    dp = DrizzlePSF(flt_files=flt_files, driz_hdu=hdu[0])

                    psf = dp.get_psf(ra=dp.driz_wcs.wcs.crval[0],
                                 dec=dp.driz_wcs.wcs.crval[1],
                                 filter=filt.upper(),
                                 pixfrac=dp.driz_header['PIXFRAC'],
                                 kernel=dp.driz_header['KERNEL'],
                                 wcs_slice=dp.driz_wcs, get_extended=True,
                                 verbose=False, get_weight=False)

                    psf[1].header['EXTNAME'] = 'PSF'
                    #psf[1].header['EXTVER'] = filt
                    hdu.append(psf[1])
                    hdu.flush()

                except:
                    pass

        if remove:
            os.system('rm *_fl*fits')

    # Dry run, just return dictionary of the found exposure files
    if dryrun:
        return filt_dict

    # Nothing found
    if len(has_filts) == 0:
        return []

    if combine_similar_filters:
        combine_filters(label=label)

    if rgb_params:
        #auto_script.field_rgb(root=label, HOME_PATH=None, filters=has_filts, **rgb_params)
        show_all_thumbnails(label=label, thumb_height=thumb_height, scale_ab=scale_ab, close=True, rgb_params=rgb_params, filters=show_filters)

    if (single_output != 0):
        # Concatenate into a single FITS file
        files = glob.glob('{0}-f*_dr[cz]_sci.fits'.format(label))
        files.sort()

        if combine_similar_filters:
            comb_files = glob.glob('{0}-[a-eg-z]*_dr[cz]_sci.fits'.format(label))
            comb_files.sort()
            files += comb_files

        hdul = None
        for file in files:
            hdu_i = pyfits.open(file)
            hdu_i[0].header['EXTNAME'] = 'SCI'
            if 'NCOMBINE' in hdu_i[0].header:
                if hdu_i[0].header['NCOMBINE'] <= single_output:
                    continue

                filt_i = file.split('-')[-1].split('_dr')[0]
            else:
                filt_i = utils.get_hst_filter(hdu_i[0].header)

            for h in hdu_i:
                h.header['EXTVER'] = filt_i
                if hdul is None:
                    hdul = pyfits.HDUList([h])
                else:
                    hdul.append(h)

            print('Add to {0}.thumb.fits: {1}'.format(label, file))

            # Weight
            hdu_i = pyfits.open(file.replace('_sci', '_wht'))
            hdu_i[0].header['EXTNAME'] = 'WHT'
            for h in hdu_i:
                h.header['EXTVER'] = filt_i
                if hdul is None:
                    hdul = pyfits.HDUList([h])
                else:
                    hdul.append(h)

        hdul.writeto('{0}.thumb.fits'.format(label), overwrite=True,
                     output_verify='fix')

        for file in files:
            for f in [file, file.replace('_sci', '_wht')]:
                if os.path.exists(f):
                    print('Remove {0}'.format(f))
                    os.remove(f)

    # Segmentation figure
    thumb_file = '{0}.thumb.fits'.format(label)
    if (make_segmentation_figure) & (os.path.exists(thumb_file)) & (aws_prep_dir is not None):

        print('Make segmentation figure')

        # Fetch segmentation image and catalog
        s3_prep_path = 'Pipeline/{0}/Prep/'.format(prep_root)
        s3_full_path = '{0}/{1}'.format(prep_bucket, s3_prep_path)
        s3_file = '{0}_visits.npy'.format(prep_root)

        has_seg_files = True
        seg_files = ['{0}-ir_seg.fits.gz'.format(prep_root),
                     '{0}_phot.fits'.format(prep_root)]

        for s3_file in seg_files:
            if not os.path.exists(s3_file):
                remote_file = os.path.join(s3_prep_path, s3_file)
                try:
                    print('Fetch {0}'.format(remote_file))
                    prep_bkt.download_file(remote_file, s3_file,
                                   ExtraArgs={"RequestPayer": "requester"})
                except:
                    has_seg_files = False
                    print('Make segmentation figure failed: {0}'.format(remote_file))
                    break

        if has_seg_files:
            s3_cat = utils.read_catalog(seg_files[1])
            segmentation_figure(label, s3_cat, seg_files[0])

    if aws_bucket:
        #aws_bucket = 's3://grizli-cosmos/CutoutProducts/'
        #aws_bucket = 's3://grizli/CutoutProducts/'

        s3 = boto3.resource('s3')
        s3_client = boto3.client('s3')
        bkt = s3.Bucket(aws_bucket.split("/")[2])
        aws_path = '/'.join(aws_bucket.split("/")[3:])

        if sync_fits:
            files = glob.glob('{0}*'.format(label))
        else:
            files = glob.glob('{0}*png'.format(label))

        for file in files:
            print('{0} -> {1}'.format(file, aws_bucket))
            bkt.upload_file(file, '{0}/{1}'.format(aws_path, file).replace('//', '/'), ExtraArgs={'ACL': 'public-read'})

        #os.system('aws s3 sync --exclude "*" --include "{0}*" ./ {1} --acl public-read'.format(label, aws_bucket))

        #os.system("""echo "<pre>" > index.html; aws s3 ls AWSBUCKETX --human-readable | sort -k 1 -k 2 | grep -v index | awk '{printf("%s %s",$1, $2); printf(" %6s %s ", $3, $4); print "<a href="$5">"$5"</a>"}'>> index.html; aws s3 cp index.html AWSBUCKETX --acl public-read""".replace('AWSBUCKETX', aws_bucket))

    return has_filts
Example #7
0
def apply_catalog_corrections(root,
                              total_flux='flux_auto',
                              auto_corr=True,
                              get_external_photometry=False,
                              aperture_indices='all',
                              suffix='_apcorr',
                              verbose=True,
                              apply_background=True):
    """
    Aperture and background corrections to photometric catalog
    """
    import os
    import eazy
    import numpy as np

    from grizli import utils
    import mastquery.utils

    cat = utils.read_catalog('{0}_phot.fits'.format(root))
    filters = []
    for c in cat.meta:
        if c.endswith('_ZP'):
            filters.append(c.split('_ZP')[0].lower())

    if get_external_photometry:
        print('Get external photometry from Vizier')
        try:
            ext = get_external_catalog(cat,
                                       external_limits=external_limits,
                                       timeout=external_timeout,
                                       sys_err=external_sys_err)
            for c in ext.colnames:
                if c not in cat.colnames:
                    cat[c] = ext[c]

            for k in ext.meta:
                cat.meta[k] = ext.meta[k]
        except:
            print(' - External catalog FAILED')
            pass

    # Fix: Take flux_auto when flag==0, flux otherwise
    if (total_flux == 'flux_auto_fix') & (total_flux not in cat.colnames):
        flux = cat['flux_auto'] * 1.
        flagged = (cat['flag'] > 0)
        flux[flagged] = cat['flux'][flagged]
        cat['flux_auto_fix'] = flux * 1.

    # Additional auto correction

    cat.meta['TOTALCOL'] = total_flux, 'Column for total flux'
    #cat.meta['HASTOT'] = (auto_corr &  ('tot_corr' in cat.colnames), 'Catalog has full total flux')

    apcorr = {}
    for NAPER in range(100):
        if 'APER_{0}'.format(NAPER) not in cat.meta:
            break

    if aperture_indices == 'all':
        aperture_indices = range(NAPER)

    for i in aperture_indices:

        if verbose:
            print('Compute aperture corrections: i={0}, D={1:.2f}" aperture'.
                  format(i, cat.meta['ASEC_{0}'.format(i)]))

        if 'flux_aper_{0}'.format(i) in cat.colnames:
            cat['apcorr_{0}'.format(
                i)] = cat[total_flux] / cat['flux_aper_{0}'.format(i)]
            for f in filters:
                bkgc = '{0}_bkg_aper_{1}'.format(f, i)
                # if (bkgc in cat.colnames) & apply_background:
                #     bkg = cat[bkgc]
                # else:
                #     bkg = 0.

                # background already subtracted from flux columns
                bkg = 0.

                cat['{0}_corr_{1}'.format(
                    f, i)] = (cat['{0}_flux_aper_{1}'.format(f, i)] -
                              bkg) * cat['apcorr_{0}'.format(i)]
                cat['{0}_ecorr_{1}'.format(
                    f, i)] = cat['{0}_fluxerr_aper_{1}'.format(
                        f, i)] * cat['apcorr_{0}'.format(i)]

                # mask_thresh = np.percentile(cat['{0}_mask_aper_{1}'.format(f, i)], 95)
                aper_area = np.pi * (cat.meta['APER_{0}'.format(i)] / 2)**2
                mask_thresh = aper_area

                bad = cat['{0}_mask_aper_{1}'.format(f, i)] > 0.2 * mask_thresh
                cat['{0}_corr_{1}'.format(f, i)][bad] = -99
                cat['{0}_ecorr_{1}'.format(f, i)][bad] = -99

                tot_col = '{0}_tot_corr'.format(f.lower())

                if auto_corr and (tot_col in cat.colnames):
                    cat['{0}_tot_{1}'.format(
                        f,
                        i)] = cat['{0}_corr_{1}'.format(f, i)] * cat[tot_col]
                    cat['{0}_etot_{1}'.format(
                        f,
                        i)] = cat['{0}_ecorr_{1}'.format(f, i)] * cat[tot_col]

                    cat['{0}_tot_{1}'.format(f, i)][bad] = -99
                    cat['{0}_etot_{1}'.format(f, i)][bad] = -99

    if 'id' not in cat.colnames:
        cat.rename_column('number', 'id')

    cat['z_spec'] = cat['id'] * 0. - 1

    # Spurious sources, sklearn SVM model trained for a single field
    morph_model = os.path.join(os.path.dirname(utils.__file__),
                               'data/sep_catalog_junk.pkl')

    # Only apply if detection pixel scale is 0.06"
    if 'ASEC_0' in cat.meta:
        try:
            detection_pscale = cat.meta['ASEC_0'][0] / cat.meta['APER_0'][0]
        except:
            detection_pscale = cat.meta['ASEC_0'] / cat.meta['APER_0']

        run_morph_model = np.isclose(detection_pscale, 0.06, atol=0.005)
    else:
        run_morph_model = True

    if os.path.exists(morph_model) & run_morph_model:
        if verbose:
            print('Apply morphological validity class')

        from sklearn.externals import joblib
        clf = joblib.load(morph_model)
        X = np.hstack([[cat['peak'] / cat['flux'],
                        cat['cpeak'] / cat['peak']]]).T

        # Predict labels, which where generated for
        #    bad_bright, bad_faint, stars, big_galaxies, small_galaxies
        pred = clf.predict_proba(X)

        # Should be >~ 0.9 for valid sources, stars & galaxies in "ir" image
        cat['class_valid'] = pred[:, -3:].sum(axis=1)
        cat['class_valid'].format = '.2f'

    cat['dummy_err'] = 10**(-0.4 * (8 - 23.9))
    cat['dummy_flux'] = cat[total_flux]  # detection band

    if suffix:
        if verbose:
            print('Write {0}_phot{1}.fits'.format(root, suffix))

        cat.write('{0}_phot{1}.fits'.format(root, suffix), overwrite=True)

    return cat
Example #8
0
def eazy_photoz(root,
                force=False,
                object_only=True,
                apply_background=True,
                aper_ix=1,
                apply_prior=False,
                beta_prior=True,
                get_external_photometry=False,
                external_limits=3,
                external_sys_err=0.3,
                external_timeout=300,
                sys_err=0.05,
                z_step=0.01,
                z_min=0.01,
                z_max=12,
                total_flux='flux_auto',
                auto_corr=True,
                compute_residuals=False,
                dummy_prior=False,
                extra_rf_filters=[],
                quiet=True):

    import os
    import eazy
    import numpy as np

    from grizli import utils
    import mastquery.utils

    if (os.path.exists('{0}.eazypy.self.npy'.format(root))) & (not force):
        self = np.load('{0}.eazypy.self.npy'.format(root),
                       allow_pickle=True)[0]
        zout = utils.read_catalog('{0}.eazypy.zout.fits'.format(root))
        cat = utils.read_catalog('{0}_phot_apcorr.fits'.format(root))
        return self, cat, zout

    trans = {
        'f098m': 201,
        'f105w': 202,
        'f110w': 241,
        'f125w': 203,
        'f140w': 204,
        'f160w': 205,
        'f435w': 233,
        'f475w': 234,
        'f555w': 235,
        'f606w': 236,
        'f625w': 237,
        'f775w': 238,
        'f814w': 239,
        'f850lp': 240,
        'f702w': 15,
        'f600lpu': 243,
        'f225wu': 207,
        'f275wu': 208,
        'f336w': 209,
        'f350lpu': 339,
        'f438wu': 211,
        'f475wu': 212,
        'f475xu': 242,
        'f555wu': 213,
        'f606wu': 214,
        'f625wu': 215,
        'f775wu': 216,
        'f814wu': 217,
        'f390wu': 210,
        'ch1': 18,
        'ch2': 19
    }

    #trans.pop('f814w')

    print('Apply catalog corrections')
    apply_catalog_corrections(root, suffix='_apcorr')

    cat = utils.read_catalog('{0}_phot_apcorr.fits'.format(root))
    filters = []
    for c in cat.meta:
        if c.endswith('_ZP'):
            filters.append(c.split('_ZP')[0].lower())

    # Translate
    fp = open('zphot.translate', 'w')
    for f in filters:
        if f in trans:
            fp.write('{0}_tot_{1} F{2}\n'.format(f, aper_ix, trans[f]))
            fp.write('{0}_etot_{1} E{2}\n'.format(f, aper_ix, trans[f]))

    fp.write('irac_ch1_flux F18\n')
    fp.write('irac_ch1_err  E18\n')

    fp.write('irac_ch2_flux F19\n')
    fp.write('irac_ch2_err  E19\n')

    # For zeropoint
    if dummy_prior:
        fp.write('dummy_flux F205x\n')
        fp.write('dummy_err  E205x\n')

    fp.close()

    params = {}
    params['CATALOG_FILE'] = '{0}_phot_apcorr.fits'.format(root)
    params['Z_STEP'] = z_step
    params['MAIN_OUTPUT_FILE'] = '{0}.eazypy'.format(root)

    params['Z_MAX'] = z_max
    params['MW_EBV'] = mastquery.utils.get_irsa_dust(cat['ra'].mean(),
                                                     cat['dec'].mean())
    params['PRIOR_ABZP'] = 23.9

    params['SYS_ERR'] = sys_err
    params['CAT_HAS_EXTCORR'] = False

    # Pick prior filter, starting from reddest
    for f in [
            'f435w', 'f606w', 'f814w', 'f105w', 'f110w', 'f125w', 'f140w',
            'f160w'
    ][::-1]:
        if f in filters:
            if dummy_prior:
                params['PRIOR_FILTER'] = 'dummy_flux'
            else:
                params['PRIOR_FILTER'] = trans[f]

            mag = 23.9 - 2.5 * np.log10(cat['{0}_corr_{1}'.format(f, aper_ix)])
            break
    #
    if os.path.exists(
            'templates/fsps_full/tweak_fsps_QSF_11_v3_noRed.param.fits'):
        params[
            'TEMPLATES_FILE'] = 'templates/fsps_full/tweak_fsps_QSF_11_v3_noRed.param'
    else:
        params[
            'TEMPLATES_FILE'] = 'templates/fsps_full/tweak_fsps_QSF_12_v3.param'

    zpfile = None
    load_products = False

    if (not os.path.exists('FILTER.RES.latest')
            or not os.path.exists('templates')):
        try:
            # should work with eazy-py >= 0.2.0
            eazy.symlink_eazy_inputs(path=None)
        except:
            print("""
The filter file `FILTER.RES.latest` and `templates` directory were not
found in the working directory and the automatic command to retrieve them 
failed: 
    
    >>> import eazy; eazy.symlink_eazy_inputs(path=None)  

Run it with `path` pointing to the location of the `eazy-photoz` repository."""
                  )
            return False

    self = eazy.photoz.PhotoZ(param_file=None,
                              translate_file='zphot.translate',
                              zeropoint_file=zpfile,
                              params=params,
                              load_prior=True,
                              load_products=load_products)

    if quiet:
        self.param.params['VERBOSITY'] = 1.

    if object_only:
        return self

    idx = np.arange(self.NOBJ)

    #sample = (mag < 27) #& (self.cat['star_flag'] != 1)
    #sample |= (self.cat['z_spec'] > 0)
    sample = np.isfinite(self.cat['id'])  #mag)

    for iter in range(1 + (get_external_photometry & compute_residuals) * 1):
        self.fit_parallel(idx[sample], n_proc=10)
        if compute_residuals:
            self.error_residuals()

    self.standard_output(prior=apply_prior,
                         beta_prior=beta_prior,
                         extra_rf_filters=extra_rf_filters)

    zout = utils.read_catalog('{0}.eazypy.zout.fits'.format(root))

    np.save('{0}.eazypy.self.npy'.format(root), [self])

    return self, cat, zout
Example #9
0
def irac_mosaics(root='j000308m3303', home='/GrizliImaging/', pixfrac=0.2, kernel='square', initial_pix=1.0, final_pix=0.5, pulldown_mag=15.2, sync_xbcd=True, skip_fetch=False, radec=None, mosaic_pad=2.5, drizzle_ref_file='', run_alignment=True, assume_close=True, bucket='grizli-v1', aor_query='r*', mips_ext='[_e]bcd.fits', channels=['ch1','ch2','ch3','ch4','mips1'], drz_query='r*', sync_results=True, ref_seg=None, min_frame={'irac':5, 'mips':1.0}, med_max_size=500e6, stop_at='', make_psf=True, **kwargs):
    """
    stop_at: preprocess, make_compact
    
    """
    
    from grizli import utils

    from . import irac
    from .utils import get_wcslist, fetch_irac
    
    PATH = os.path.join(home, root)
    try:
        os.mkdir(PATH)
    except:
        pass

    os.chdir(PATH)
        
    if not skip_fetch:
        # Fetch IRAC bcds
        if not os.path.exists(f'{root}_ipac.fits'):
            os.system(f'wget https://s3.amazonaws.com/{bucket}/IRAC/{root}_ipac.fits')
    
        res = fetch_irac(root=root, path='./', channels=channels)
        
        if res in [False, None]:
            # Nothing to do
            make_html(root, bucket=bucket)

            print(f'### Done: \n https://s3.amazonaws.com/{bucket}/Pipeline/{root}/IRAC/{root}.irac.html')

            utils.log_comment(f'/tmp/{root}.success', 'Done!', 
                              verbose=True, show_date=True)
            return True
            
    # Sync CHArGE HST images
    os.system(f'aws s3 sync s3://{bucket}/Pipeline/{root}/Prep/ ./ '
              f' --exclude "*" --include "{root}*seg.fits*"'
              f' --include "{root}-ir_drz*fits*"'
              f' --include "{root}*psf.fits*"'
              f' --include "{root}-f[01]*_drz*fits.gz"'
              f' --include "{root}*phot.fits"')
    
    # Drizzle properties of the preliminary mosaic
    #pixfrac, pix, kernel = 0.2, 1.0, 'square'       
    
    # Define an output WCS aligned in pixel phase to the HST mosaic ()

    if not os.path.exists('ref_hdu.fits'):
        wcslist = get_wcslist(skip=-500)
        out_hdu = utils.make_maximal_wcs(wcslist, pixel_scale=initial_pix, theta=0, pad=5, get_hdu=True, verbose=True)

        # Make sure pixels align
        ref_file = glob.glob('{0}-f[01]*_drz_sci.fits*'.format(root))
        if len(ref_file) == 0:
            os.system(f'aws s3 sync s3://{bucket}/Pipeline/{root}/Prep/ ./ '
                      f' --exclude "*"'
                      f' --include "{root}-f[678]*_dr*fits.gz"')
            
            ref_file = glob.glob('{0}-f[678]*_dr*_sci.fits*'.format(root))
        
        ref_file = ref_file[-1]

        print(f'\nHST reference image: {ref_file}\n')

        ref_hdu = pyfits.open(ref_file)[0].header
        ref_filter = utils.get_hst_filter(ref_hdu).lower()

        ref_wcs = pywcs.WCS(ref_hdu)
        ref_rd = ref_wcs.all_pix2world(np.array([[-0.5, -0.5]]), 0).flatten()
        target_phase = np.array([0.5, 0.5])#/(pix/0.1)
        for k in ['RADESYS', 'LATPOLE', 'LONPOLE']:
            out_hdu.header[k] = ref_hdu[k]

        # Shift CRVAL to same tangent point
        out_wcs = pywcs.WCS(out_hdu.header)
        out_xy = out_wcs.all_world2pix(np.array([ref_wcs.wcs.crval]), 1).flatten()
        out_hdu.header['CRVAL1'], out_hdu.header['CRVAL2'] = tuple(ref_wcs.wcs.crval)
        out_hdu.header['CRPIX1'], out_hdu.header['CRPIX2'] = tuple(out_xy)

        # Align integer pixel phase
        out_wcs = pywcs.WCS(out_hdu.header)
        out_xy = out_wcs.all_world2pix(np.array([ref_rd]), 0).flatten()
        xy_phase = out_xy - np.floor(out_xy)
        new_crpix = out_wcs.wcs.crpix - (xy_phase - target_phase)
        out_hdu.header['CRPIX1'], out_hdu.header['CRPIX2'] = tuple(new_crpix)
        out_wcs = pywcs.WCS(out_hdu.header)

        out_hdu.writeto('ref_hdu.fits', output_verify='Fix')

    else:
        out_hdu = pyfits.open('ref_hdu.fits')[1]
    
    ########
    
    files = []
    for ch in channels:
        if 'mips' in ch:
            mc = ch.replace('mips','ch')
            files += glob.glob(f'{aor_query}/{mc}/bcd/SPITZER_M*{mips_ext}')
            files += glob.glob(f'{aor_query}/{mc}/bcd/SPITZER_M*xbcd.fits.gz')
        else:
            files += glob.glob(f'{aor_query}/{ch}/bcd/SPITZER_I*cbcd.fits')
            files += glob.glob(f'{aor_query}/{ch}/bcd/SPITZER_I*xbcd.fits.gz')
            
    files.sort()

    roots = np.array([file.split('/')[0] for file in files])
    with_channels = np.array([file.split('_')[1] for file in files])
    all_roots = np.array(['{0}-{1}'.format(r, c.replace('I','ch').replace('M', 'mips')) for r, c in zip(roots, with_channels)])

    tab = {'aor':[], 'N':[], 'channel':[]}
    for r in np.unique(all_roots):
        tab['aor'].append(r.split('-')[0])
        tab['N'].append((all_roots == r).sum())
        tab['channel'].append(r.split('-')[1])

    aors = utils.GTable(tab)
    print(aors)
    
    ########
    SKIP = True          # Don't regenerate finished files
    delete_group = False # Delete intermediate products from memory
    zip_outputs = False    # GZip intermediate products

    aors_ch = {}
    
    ########
    # Process mosaics by AOR
    # Process in groups, helps for fields like HFF with dozens/hundreds of AORs!
    for ch in channels:
            
        aor = aors[(aors['channel'] == ch) & (aors['N'] > 5)]
        if len(aor) == 0:
            continue

        #aors_ch[ch] = []

        if ch in ['ch1','ch2']:
            NPER, instrument = 500, 'irac'
        if ch in ['ch3','ch4']:
            NPER, instrument = 500, 'irac'
        elif ch in ['mips1']:
            NPER, instrument = 400, 'mips'
        
        min_frametime = min_frame[instrument]
        
        nsort = np.cumsum(aor['N']/NPER)
        NGROUP = int(np.ceil(nsort.max()))

        count = 0

        for g in range(NGROUP):
            root_i = root+'-{0:02d}'.format(g)

            gsel = (nsort > g) & (nsort <= g+1)
            aor_ids = list(aor['aor'][gsel])
            print('{0}-{1}   N_AOR = {2:>2d}  N_EXP = {3:>4d}'.format(root_i, ch,  len(aor_ids), aor['N'][gsel].sum()))
            count += gsel.sum()

            files = glob.glob('{0}-{1}*'.format(root_i, ch))
            if (len(files) > 0) & (SKIP): 
                print('Skip {0}-{1}'.format(root_i, ch))
                continue
            
            with open('{0}-{1}.log'.format(root_i, ch),'w') as fp:
                fp.write(time.ctime())
                
            # Do internal alignment to GAIA.  
            # Otherwise, set `radec` to the name of a file that has two columns with 
            # reference ra/dec.
            #radec = None 

            # Pipeline
            if instrument == 'mips':
                aors_ch[ch] = irac.process_all(channel=ch.replace('mips','ch'), output_root=root_i, driz_scale=initial_pix, kernel=kernel, pixfrac=pixfrac, wcslist=None, pad=0, out_hdu=out_hdu, aor_ids=aor_ids, flat_background=False, two_pass=True, min_frametime=min_frametime, instrument=instrument, align_threshold=0.15, radec=radec, run_alignment=False, mips_ext=mips_ext, ref_seg=ref_seg, global_mask=root+'_mask.reg')
            else:
                aors_ch[ch] = irac.process_all(channel=ch, output_root=root_i, driz_scale=initial_pix, kernel=kernel, pixfrac=pixfrac, wcslist=None, pad=0, out_hdu=out_hdu, aor_ids=aor_ids, flat_background=False, two_pass=True, min_frametime=min_frametime, instrument=instrument, radec=radec, run_alignment=run_alignment, assume_close=assume_close, ref_seg=ref_seg, global_mask=root+'_mask.reg', med_max_size=med_max_size)

            if len(aors_ch[ch]) == 0:
                continue

            # PSFs
            plt.ioff()

            if (instrument != 'mips') & make_psf:
                ch_num = int(ch[-1])
                segmask=True

                # psf_size=20
                # for p in [0.1, final_pix]:
                #     irac.mosaic_psf(output_root=root_i, target_pix=p, channel=ch_num, aors=aors_ch[ch], kernel=kernel, pixfrac=pixfrac, size=psf_size, native_orientation=False, instrument=instrument, subtract_background=False, segmentation_mask=segmask, max_R=10)
                #     plt.close('all')

                psf_size=30
                p = 0.1
                irac.mosaic_psf(output_root=root_i, target_pix=p, channel=ch_num, aors=aors_ch[ch], kernel=kernel, pixfrac=pixfrac, size=psf_size, native_orientation=True, subtract_background=False, segmentation_mask=segmask, max_R=10)

                plt.close('all')

            if delete_group:
                del(aors_ch[ch])

            print('Done {0}-{1}, gzip products'.format(root_i, ch))

            if zip_outputs:
                os.system('gzip {0}*-{1}_drz*fits'.format(root_i, ch))
        
        # PSFs
        if (instrument != 'mips') & make_psf:
            # Average PSF
            p = 0.1
            files = glob.glob('*{0}-{1:.1f}*psfr.fits'.format(ch, p))
            if len(files) == 0:
                continue
                
            files.sort()
            avg = None
            for file in files: 
                im = pyfits.open(file)
                if avg is None:
                    wht = im[0].data != 0
                    avg = im[0].data*wht
                else:
                    wht_i = im[0].data != 0
                    avg += im[0].data*wht_i
                    wht += wht_i
                
                im.close()
                
            avg = avg/wht
            avg[wht == 0] = 0

            # Window
            from photutils import (HanningWindow, TukeyWindow, 
                                   CosineBellWindow,
                                   SplitCosineBellWindow, TopHatWindow)

            coswindow = CosineBellWindow(alpha=1)
            avg *= coswindow(avg.shape)**0.05
            avg /= avg.sum()

            pyfits.writeto('{0}-{1}-{2:0.1f}.psfr_avg.fits'.format(root, ch, p), data=avg, header=im[0].header, overwrite=True)
    
    ####
    ## Show the initial product
    plt.ioff()
    for i in range(10):
        files = glob.glob(f'{root}-{i:02d}-ch*sci.fits')
        if len(files) > 0:
            break
            
    files.sort()
    
    if len(files) == 1:
        subs = 1,1
        fs = [7,7]
    elif len(files) == 2:
        subs = 1,2
        fs = [14,7]
    elif len(files) == 3:
        subs = 2,2
        fs = [14,14]
    else:
        subs = 2,2
        fs = [14,14]
        
    fig = plt.figure(figsize=fs)
    for i, file in enumerate(files[:4]):
        im = pyfits.open(file)
        print('{0} {1} {2:.1f} s'.format(file, im[0].header['FILTER'], im[0].header['EXPTIME']))
        ax = fig.add_subplot(subs[0], subs[1], 1+i)
        ax.imshow(im[0].data, vmin=-0.1, vmax=1, cmap='gray_r', origin='lower')
        ax.text(0.05, 0.95, file, ha='left', va='top', color='k', 
                transform=ax.transAxes)
        
        im.close()
        
    if len(files) > 1:
        fig.axes[1].set_yticklabels([])
    
    if len(files) > 2:
        fig.axes[0].set_xticklabels([])
        fig.axes[1].set_xticklabels([])
    
    if len(files) > 3:
        fig.axes[3].set_yticklabels([])
        
    fig.tight_layout(pad=0.5)
    fig.savefig(f'{root}.init.png')
    plt.close('all')
    
    if stop_at == 'preprocess':
        return True
        
    #######
    # Make more compact individual exposures and clean directories
    wfiles = []
    for ch in channels:
        if 'mips' in ch:
            chq = ch.replace('mips','ch')
            wfiles += glob.glob(f'{aor_query}/{chq}/bcd/SPITZER_M*wcs.fits')
        else:
            wfiles += glob.glob(f'{aor_query}/{ch}/bcd/SPITZER_I*wcs.fits')

    #wfiles = glob.glob('r*/*/bcd/*_I[1-4]_*wcs.fits')
    #wfiles += glob.glob('r*/*/bcd/*_M[1-4]_*wcs.fits')
    wfiles.sort()

    for wcsfile in wfiles:
        outfile = wcsfile.replace('_wcs.fits', '_xbcd.fits.gz')
        if os.path.exists(outfile):
            print(outfile)
        else:
            irac.combine_products(wcsfile)
            print('Run: ', outfile)

        if os.path.exists(outfile):
            remove_files = glob.glob('{0}*fits'.format(wcsfile.split('_wcs')[0]))
            for f in remove_files:
                print('   rm ', f)
                os.remove(f)
 
    if stop_at == 'make_compact':
        return True
                                   
    #############
    # Drizzle final mosaics
    # Make final mosaic a bit bigger than the HST image
    pad = mosaic_pad

    # Pixel scale of final mosaic.
    # Don't make too small if not many dithers available as in this example.
    # But for well-sampled mosaics like RELICS / HFF, can push this to perhaps 0.3" / pix
    pixscale = final_pix #0.5

    # Again, if have many dithers maybe can use more aggressive drizzle parameters,
    # like a 'point' kernel or smaller pixfrac (a 'point' kernel is pixfrac=0)
    #kernel, pixfrac = 'square', 0.2

    # Correction for bad columns near bright stars
    #pulldown_mag = 15.2 

    ##############
    # Dilation for CR rejection
    dil = np.ones((3,3))
    driz_cr = [7, 4]
    blot_interp = 'poly5'
    bright_fmax = 0.5
    
    ### Drizzle
    for ch in channels: #[:2]:
        ###########
        # Files and reference image for extra CR rejection
        if ch == 'mips1':
            files = glob.glob('{0}/ch1/bcd/SPITZER_M1_*xbcd.fits*'.format(drz_query, ch))
            files.sort()
            pulldown_mag = -10
            pixscale = 1.
            kernel = 'point'
        else:
            files = glob.glob('{0}/{1}/bcd/*_I?_*xbcd.fits*'.format(drz_query, ch))
            files.sort()

        #ref = pyfits.open('{0}-00-{1}_drz_sci.fits'.format(root, ch))
        #ref_data = ref[0].data.astype(np.float32)

        ref_files = glob.glob(f'{root}-??-{ch}*sci.fits')
        if len(ref_files) == 0:
            continue

        num = None
        for ref_file in ref_files:
            ref = pyfits.open(ref_file)
            wht = pyfits.open(ref_file.replace('_sci.fits', '_wht.fits'))
            if num is None:
                num = ref[0].data*wht[0].data
                den = wht[0].data
            else:
                num += ref[0].data*wht[0].data
                den += wht[0].data

        ref_data = (num/den).astype(np.float32)
        ref_data[den <= 0] = 0

        ref_wcs = pywcs.WCS(ref[0].header, relax=True) 
        ref_wcs.pscale = utils.get_wcs_pscale(ref_wcs) 
        if (not hasattr(ref_wcs, '_naxis1')) & hasattr(ref_wcs, '_naxis'):
            ref_wcs._naxis1, ref_wcs._naxis2 = ref_wcs._naxis

        ##############
        # Output WCS based on HST footprint
        if drizzle_ref_file == '':
            try:
                hst_im = pyfits.open(glob.glob('{0}-f[01]*_drz_sci.fits*'.format(root))[-1])
            except:
                hst_im = pyfits.open(glob.glob('{0}-f[578]*_dr*sci.fits*'.format(root))[-1])
            
    
            hst_wcs = pywcs.WCS(hst_im[0])
            hst_wcs.pscale = utils.get_wcs_pscale(hst_wcs) 

            try:
                size = (np.round(np.array([hst_wcs._naxis1, hst_wcs._naxis2])*hst_wcs.pscale*pad/pixscale)*pixscale)
            except:
                size = (np.round(np.array([hst_wcs._naxis[0], hst_wcs._naxis[1]])*hst_wcs.pscale*pad/pixscale)*pixscale)
            
            hst_rd = hst_wcs.calc_footprint().mean(axis=0)
            _x = utils.make_wcsheader(ra=hst_rd[0], dec=hst_rd[1],
                                      size=size, 
                                      pixscale=pixscale, 
                                      get_hdu=False, theta=0)
            
            out_header, out_wcs = _x
        else:
            driz_ref_im = pyfits.open(drizzle_ref_file)
            out_wcs = pywcs.WCS(driz_ref_im[0].header, relax=True)
            out_wcs.pscale = utils.get_wcs_pscale(out_wcs) 
            
            out_header = utils.to_header(out_wcs)
        
        if (not hasattr(out_wcs, '_naxis1')) & hasattr(out_wcs, '_naxis'):
            out_wcs._naxis1, out_wcs._naxis2 = out_wcs._naxis
            
        ##############
        # Bright stars for pulldown correction
        cat_file = glob.glob(f'{root}-[0-9][0-9]-{ch}.cat.fits')[0]
        ph = utils.read_catalog(cat_file) 
        bright = (ph['mag_auto'] < pulldown_mag) # & (ph['flux_radius'] < 3)
        ph = ph[bright]

        ##############
        # Now do the drizzling
        yp, xp = np.indices((256, 256))
        orig_files = []

        out_header['DRIZ_CR0'] = driz_cr[0]
        out_header['DRIZ_CR1'] = driz_cr[1]
        out_header['KERNEL'] = kernel
        out_header['PIXFRAC'] = pixfrac
        out_header['NDRIZIM'] = 0
        out_header['EXPTIME'] = 0
        out_header['BUNIT'] = 'microJy'
        out_header['FILTER'] = ch

        med_root = 'xxx'
        N = len(files)

        for i, file in enumerate(files):#[:100]):

            print('{0}/{1} {2}'.format(i, N, file))

            if file in orig_files:
                continue

            im = pyfits.open(file)
            ivar = 1/im['CBUNC'].data**2    
            msk = (~np.isfinite(ivar)) | (~np.isfinite(im['CBCD'].data))
            im['CBCD'].data[msk] = 0
            ivar[msk] = 0

            wcs = pywcs.WCS(im['WCS'].header, relax=True)
            wcs.pscale = utils.get_wcs_pscale(wcs)
            if (not hasattr(wcs, '_naxis1')) & hasattr(wcs, '_naxis'):
                wcs._naxis1, wcs._naxis2 = wcs._naxis
            
            fp = Path(wcs.calc_footprint())

            med_root_i = im.filename().split('/')[0]
            if med_root != med_root_i:
                print('\n Read {0}-{1}_med.fits \n'.format(med_root_i, ch))
                med = pyfits.open('{0}-{1}_med.fits'.format(med_root_i, ch))
                med_data = med[0].data.astype(np.float32)
                med_root = med_root_i
                med.close()
                
                try:
                    gaia_rd = utils.read_catalog('{0}-{1}_gaia.radec'.format(med_root_i, ch))
                    ii, rr = gaia_rd.match_to_catalog_sky(ph)
                    gaia_rd = gaia_rd[ii][rr.value < 2]
                    gaia_pts = np.array([gaia_rd['ra'].data, 
                                         gaia_rd['dec'].data]).T
                except:
                    gaia_rd = []

            #data = im['CBCD'].data - aor_med[0].data

            # Change output units to uJy / pix
            if ch == 'mips1':
                # un = 1*u.MJy/u.sr
                # #to_ujy_px = un.to(u.uJy/u.arcsec**2).value*(out_wcs.pscale**2)
                # to_ujy_px = un.to(u.uJy/u.arcsec**2).value*(native_scale**2)
                to_ujy_px = 146.902690
            else:
                # native_scale = 1.223
                # un = 1*u.MJy/u.sr
                # #to_ujy_px = un.to(u.uJy/u.arcsec**2).value*(out_wcs.pscale**2)
                # to_ujy_px = un.to(u.uJy/u.arcsec**2).value*(native_scale**2)
                to_ujy_px = 35.17517196810

            blot_data = ablot.do_blot(ref_data, ref_wcs, wcs, 1, coeffs=True, 
                                      interp=blot_interp, 
                                      sinscl=1.0, stepsize=10, 
                                      wcsmap=None)/to_ujy_px

            # mask for bright stars
            eblot = 1-np.clip(blot_data, 0, bright_fmax)/bright_fmax

            # Initial CR
            clean = im[0].data - med_data - im['WCS'].header['PEDESTAL']
            dq = (clean - blot_data)*np.sqrt(ivar)*eblot > driz_cr[0]

            # Adjacent CRs
            dq_dil = binary_dilation(dq, selem=dil)
            dq |= ((clean - blot_data)*np.sqrt(ivar)*eblot > driz_cr[1]) & (dq_dil)

            # Very negative pixels
            dq |= clean*np.sqrt(ivar) < -4

            original_dq = im['WCS'].data - (im['WCS'].data & 1)
            dq |= original_dq > 0

            # Pulldown correction for bright stars
            if len(gaia_rd) > 0:       
                mat = fp.contains_points(gaia_pts) 
                if mat.sum() > 0:
                    xg, yg = wcs.all_world2pix(gaia_rd['ra'][mat], gaia_rd['dec'][mat], 0)
                    sh = dq.shape
                    mat = (xg > 0) & (xg < sh[1]) & (yg > 0) & (yg < sh[0])
                    if mat.sum() > 0:
                        for xi, yi in zip(xg[mat], yg[mat]):
                            dq |= (np.abs(xp-xi) < 2) & (np.abs(yp-yi) > 10)

            if i == 0:
                res = utils.drizzle_array_groups([clean], [ivar*(dq == 0)], [wcs], outputwcs=out_wcs, kernel=kernel, pixfrac=pixfrac, data=None, verbose=False)
                # Copy header keywords
                wcs_header = utils.to_header(wcs)
                for k in im[0].header:
                    if (k not in ['', 'HISTORY', 'COMMENT']) & (k not in out_header) & (k not in wcs_header):
                        out_header[k] = im[0].header[k]

            else:
                _ = utils.drizzle_array_groups([clean], [ivar*(dq == 0)], [wcs], outputwcs=out_wcs, kernel=kernel, pixfrac=pixfrac, data=res[:3], verbose=False)

            out_header['NDRIZIM'] += 1
            out_header['EXPTIME'] += im[0].header['EXPTIME']
            
            im.close()
            
        # Pixel scale factor for weights
        wht_scale = (out_wcs.pscale/wcs.pscale)**-4

        # Write final images
        pyfits.writeto('{0}-{1}_drz_sci.fits'.format(root, ch), data=res[0]*to_ujy_px, header=out_header, 
                       output_verify='fix', overwrite=True)
        pyfits.writeto('{0}-{1}_drz_wht.fits'.format(root, ch), data=res[1]*wht_scale/to_ujy_px**2, 
                       header=out_header, output_verify='fix', overwrite=True)
    
    ##########
    ## Show the final drizzled images
    plt.ioff()
    files = glob.glob(f'{root}-ch*sci.fits')
    files.sort()
    
    if len(files) == 1:
        subs = 1,1
        fs = [7,7]
    elif len(files) == 2:
        subs = 1,2
        fs = [14,7]
    elif len(files) == 3:
        subs = 2,2
        fs = [14,14]
    else:
        subs = 2,2
        fs = [14,14]
        
    fig = plt.figure(figsize=fs)
    for i, file in enumerate(files[:4]):
        im = pyfits.open(file)
        print('{0} {1} {2:.1f} s'.format(file, im[0].header['FILTER'], im[0].header['EXPTIME']))
        ax = fig.add_subplot(subs[0], subs[1], 1+i)
        scl = (final_pix/initial_pix)**2
        ax.imshow(im[0].data, vmin=-0.1*scl, vmax=1*scl, cmap='gray_r', origin='lower')
        ax.text(0.05, 0.95, file, ha='left', va='top', color='k', 
                transform=ax.transAxes)
        
        im.close()
        
    if len(files) > 1:
        fig.axes[1].set_yticklabels([])
    
    if len(files) > 2:
        fig.axes[0].set_xticklabels([])
        fig.axes[1].set_xticklabels([])
    
    if len(files) > 3:
        fig.axes[3].set_yticklabels([])
        
    fig.tight_layout(pad=0.5)
    fig.savefig(f'{root}.final.png')
    plt.close('all')
    
    if sync_results:
        print('gzip mosaics')
        os.system(f'gzip -f {root}-ch*_drz*fits {root}-mips*_drz*fits')
    
        ######## Sync
        ## Sync
        print(f's3://{bucket}/Pipeline/{root}/IRAC/')
    
        make_html(root, bucket=bucket)
    
        os.system(f'aws s3 sync ./ s3://{bucket}/Pipeline/{root}/IRAC/'
                  f' --exclude "*" --include "{root}-ch*drz*fits*"'
                  f' --include "{root}-mips*drz*fits*"'
                  f' --include "{root}.*png"'
                  ' --include "*-ch*psf*" --include "*log.fits"' 
                  ' --include "*wcs.[lp]*"'
                  ' --include "*html" --include "*fail*"'
                  ' --acl public-read')
    
        if sync_xbcd:
            aor_files = glob.glob('r*-ch*med.fits')
            for aor_file in aor_files:
                aor = aor_file.split('-ch')[0]
                os.system(f'aws s3 sync ./{aor}/ s3://{bucket}/IRAC/AORS/{aor}/ --exclude "*" --include "ch*/bcd/*xbcd.fits.gz" --acl public-read')
                os.system(f'aws s3 cp {aor_file} s3://{bucket}/IRAC/AORS/ --acl public-read')
                
    msg = f'### Done: \n    https://s3.amazonaws.com/{bucket}/Pipeline/{root}/IRAC/{root}.irac.html'
       
    utils.log_comment(f'/tmp/{root}.success', msg, verbose=True, show_date=True)
Example #10
0
def auto_run(root='j023507-040202', flag_global_crs=False):

    import os
    import glob
    import numpy as np

    import astropy.io.fits as pyfits
    import astropy.wcs as pywcs

    from drizzlepac import updatehdr
    from stwcs import updatewcs

    from grizli import utils, prep
    from grizli.pipeline import auto_script
    utils.set_warnings()

    visit_file = '{0}_visits.npy'.format(root)
    visits, all_groups, info = np.load(visit_file)

    # Something wrong with some files with bad shifts, reset wcs
    for visit in visits:
        for file in visit['files']:
            utils.fetch_hst_calibs(
                file, calib_types=['IDCTAB', 'NPOLFILE', 'IMPHTTAB'])
            updatewcs.updatewcs(file, verbose=True, use_db=False)

        # Apply shifts
        shift_log = '{0}_shifts.log'.format(visit['product'])
        if os.path.exists(shift_log):
            sh = utils.read_catalog(shift_log)
            flt0 = pyfits.open(sh['flt'][0])
            wcs_ref = pywcs.WCS(flt0['SCI', 1].header, fobj=flt0, relax=True)
            shift_dict = {}
            for i in range(len(sh)):
                shift_dict[sh['flt'][i]] = [sh['xshift'][i], sh['yshift'][i]]

            prep.apply_tweak_shifts(wcs_ref,
                                    shift_dict,
                                    grism_matches={},
                                    verbose=False)

    # Redrizzle mosaics
    prep.drizzle_overlaps(visits,
                          check_overlaps=False,
                          skysub=False,
                          static=False,
                          pixfrac=0.5,
                          scale=None,
                          final_wcs=False,
                          fetch_flats=False,
                          final_rot=None,
                          include_saturated=True)

    ####### Alignment
    os.system('rm *wcs.*')

    # Radec
    master_radec = '{0}/../../{1}_master.radec'.format(os.getcwd(), root)

    if not os.path.exists(master_radec):
        master_radec = None

    ref_catalog = 'USER'

    if root.startswith('cos-'):
        hsc = '{0}/../../{1}'.format(os.getcwd(),
                                     'hsc-udeep-i25_corr_cosmos.radec')
        if os.path.exists(hsc):
            master_radec = hsc
            ref_catalog = 'HSC'

    elif root.startswith('uds-'):
        hsc = '{0}/../../{1}'.format(os.getcwd(),
                                     'hsc-udeep-sxds_corr_uds.radec')
        if os.path.exists(hsc):
            master_radec = hsc
            ref_catalog = 'HSC'

    parent_radec = '{0}/../../{1}_parent.radec'.format(os.getcwd(), root)
    if not os.path.exists(parent_radec):
        parent_radec = None

    if master_radec is not None:
        radec = master_radec
    elif parent_radec is not None:
        radec = parent_radec
    else:
        radec = None

    if radec is None:
        needs_gaia = True
    else:
        needs_gaia = False

    REFERENCE = 'GAIA'
    REFERENCE = 'PS1'

    print('master RADEC file: ', radec)

    thresh = 2.5
    for visit in visits:

        # Clean catalogs
        files = glob.glob('{0}.*'.format(visit['product']))
        for file in files:
            os.remove(file)

        # Generate GAIA alignment catalog at the observation epoch
        clip = 120
        clip = -1
        if needs_gaia:
            flt = pyfits.open(visit['files'][0])
            h = flt['SCI', 1].header
            ra_i, dec_i = h['CRVAL1'], h['CRVAL2']
            radec, ref_catalog = prep.get_radec_catalog(
                ra=ra_i,
                dec=dec_i,
                product=visit['product'],
                date=flt[0].header['EXPSTART'],
                date_format='mjd',
                reference_catalogs=[REFERENCE],
                radius=5.)
            flt.close()
            if REFERENCE == 'GAIA':
                mag_limits = [16, 20]
            else:
                mag_limits = [18, 22]
                #clip = 50

            if '_flc' in visit['files'][0]:
                triangle_size_limit = [5, 4000 * np.sqrt(2)]
            else:
                triangle_size_limit = [5, 1300]
        else:
            mag_limits = [19, 23]
            triangle_size_limit = [5, 1300]

        # Remake catalogs
        cat = prep.make_SEP_catalog(root=visit['product'], threshold=thresh)

        # Redo alignment
        try:
            print('XXX clip', clip, mag_limits, triangle_size_limit)
            result = prep.align_drizzled_image(
                root=visit['product'],
                radec=radec,
                mag_limits=mag_limits,
                simple=False,
                max_err_percentile=80,
                clip=clip,
                outlier_threshold=5,
                rms_limit=2.5,
                triangle_size_limit=triangle_size_limit)
        except:
            print('First align failed!  Relax parameters')
            try:
                result = prep.align_drizzled_image(
                    root=visit['product'],
                    radec=radec,
                    mag_limits=[10, 20],
                    simple=False,
                    max_err_percentile=99,
                    clip=160,
                    outlier_threshold=20,
                    rms_limit=2.5,
                    triangle_size_limit=triangle_size_limit)
            except:
                try:
                    result = prep.align_drizzled_image(
                        root=visit['product'],
                        radec=radec,
                        mag_limits=[10, 20],
                        simple=False,
                        max_err_percentile=99,
                        clip=160,
                        outlier_threshold=40,
                        rms_limit=2.5,
                        triangle_size_limit=triangle_size_limit)
                except:
                    radec = '{0}_ps1.radec'.format(visit['product'])
                    ref_catalog = 'PS1'
                    result = prep.align_drizzled_image(
                        root=visit['product'],
                        radec=radec,
                        mag_limits=mag_limits,
                        simple=False,
                        max_err_percentile=80,
                        clip=120,
                        outlier_threshold=5,
                        rms_limit=2.5,
                        triangle_size_limit=triangle_size_limit)

            #continue

        orig_wcs, drz_wcs, out_shift, out_rot, out_scale = result

        # Propagate shifts
        for file in visit['files']:
            updatehdr.updatewcs_with_shift(file,
                                           str('{0}_wcs.fits'.format(
                                               visit['product'])),
                                           xsh=out_shift[0],
                                           ysh=out_shift[1],
                                           rot=out_rot,
                                           scale=out_scale,
                                           wcsname=ref_catalog,
                                           force=True,
                                           reusename=True,
                                           verbose=True,
                                           sciext='SCI')

            ### Bug in astrodrizzle? Dies if the FLT files don't have MJD-OBS
            ### keywords
            im = pyfits.open(file, mode='update')
            im[0].header['MJD-OBS'] = im[0].header['EXPSTART']
            im.flush()

    # Redrizzle mosaics again including new shifts
    prep.drizzle_overlaps(visits,
                          check_overlaps=False,
                          skysub=False,
                          static=False,
                          pixfrac=0.8,
                          scale=None,
                          final_wcs=False,
                          fetch_flats=False,
                          final_rot=None)
    # Remake catalogs
    thresh = 2.5
    for visit in visits:
        # Remake catalogs
        cat = prep.make_SEP_catalog(root=visit['product'], threshold=thresh)
        prep.table_to_regions(cat, '{0}.cat.reg'.format(visit['product']))
        prep.table_to_radec(cat, '{0}.cat.radec'.format(visit['product']))

    # Update visits file
    v = auto_script.get_visit_exposure_footprints(visit_file=visit_file,
                                                  check_paths=['./', '../RAW'],
                                                  simplify=1.e-6)

    if flag_global_crs:
        # Assume everything at same orient
        pass

    if False:
        # Mosaic
        auto_script.drizzle_overlaps(root,
                                     filters=['F160W'],
                                     min_nexp=1,
                                     pixfrac=0.8,
                                     scale=0.1,
                                     make_combined=False,
                                     ref_image=None,
                                     static=False)
Example #11
0
def apply_catalog_corrections(root, total_flux='flux_auto', auto_corr=True, get_external_photometry=False, aperture_indices='all', suffix='_apcorr', verbose=True, apply_background=True):
    """
    Aperture and background corrections to photometric catalog
    """
    import os
    import eazy
    import numpy as np
    
    from grizli import utils
    import mastquery.utils
    
    cat = utils.read_catalog('{0}_phot.fits'.format(root))
    filters = []
    for c in cat.meta:
        if c.endswith('_ZP'):
            filters.append(c.split('_ZP')[0].lower())
    
    if get_external_photometry:
        print('Get external photometry from Vizier')
        try:
            ext = get_external_catalog(cat, external_limits=external_limits,
                                       timeout=external_timeout,
                                       sys_err=external_sys_err)
            for c in ext.colnames:
                if c not in cat.colnames:
                    cat[c] = ext[c]
            
            for k in ext.meta:
                cat.meta[k] = ext.meta[k]
        except:
            print(' - External catalog FAILED')
            pass
        
    # Fix: Take flux_auto when flag==0, flux otherwise
    if (total_flux == 'flux_auto_fix') & (total_flux not in cat.colnames):
        flux = cat['flux_auto']*1.
        flagged = (cat['flag'] > 0)
        flux[flagged] = cat['flux'][flagged]
        cat['flux_auto_fix'] = flux*1.
    
    # Additional auto correction

    cat.meta['TOTALCOL'] = total_flux, 'Column for total flux'
    #cat.meta['HASTOT'] = (auto_corr &  ('tot_corr' in cat.colnames), 'Catalog has full total flux')
    
    apcorr = {}
    for NAPER in range(100):
        if 'APER_{0}'.format(NAPER) not in cat.meta:
            break
    
    if aperture_indices == 'all':
        aperture_indices = range(NAPER)
        
    for i in aperture_indices:
        
        if verbose:
            print('Compute aperture corrections: i={0}, D={1:.2f}" aperture'.format(i, cat.meta['ASEC_{0}'.format(i)]))
            
        if 'flux_aper_{0}'.format(i) in cat.colnames:
            cat['apcorr_{0}'.format(i)] = cat[total_flux]/cat['flux_aper_{0}'.format(i)]
            for f in filters:
                bkgc = '{0}_bkg_aper_{1}'.format(f, i)
                if (bkgc in cat.colnames) & apply_background:
                    bkg = cat[bkgc]
                else:
                    bkg = 0.
                    
                cat['{0}_corr_{1}'.format(f, i)] = (cat['{0}_flux_aper_{1}'.format(f, i)]-bkg)*cat['apcorr_{0}'.format(i)]
                cat['{0}_ecorr_{1}'.format(f, i)] = cat['{0}_fluxerr_aper_{1}'.format(f, i)]*cat['apcorr_{0}'.format(i)]
                
                # mask_thresh = np.percentile(cat['{0}_mask_aper_{1}'.format(f, i)], 95)
                aper_area = np.pi*(cat.meta['APER_{0}'.format(i)]/2)**2
                mask_thresh = aper_area
                
                bad = cat['{0}_mask_aper_{1}'.format(f, i)] > 0.2*mask_thresh
                cat['{0}_corr_{1}'.format(f, i)][bad] = -99
                cat['{0}_ecorr_{1}'.format(f, i)][bad] = -99
                
                tot_col = '{0}_tot_corr'.format(f.lower())
                
                if auto_corr and (tot_col in cat.colnames):   
                    cat['{0}_tot_{1}'.format(f, i)] = cat['{0}_corr_{1}'.format(f, i)]*cat[tot_col]
                    cat['{0}_etot_{1}'.format(f, i)] = cat['{0}_ecorr_{1}'.format(f, i)]*cat[tot_col]

                    cat['{0}_tot_{1}'.format(f, i)][bad] = -99
                    cat['{0}_etot_{1}'.format(f, i)][bad] = -99
                                    
    cat.rename_column('number','id')
    cat['z_spec'] = cat['id']*0.-1
    
    # Spurious sources, sklearn SVM model trained for a single field
    morph_model = os.path.join(os.path.dirname(utils.__file__),
                               'data/sep_catalog_junk.pkl')
                               
    if os.path.exists(morph_model):
        if verbose:
            print('Apply morphological validity class')
        
        from sklearn.externals import joblib
        clf = joblib.load(morph_model)
        X = np.hstack([[cat['peak']/cat['flux'], 
                        cat['cpeak']/cat['peak']]]).T
        
        # Predict labels, which where generated for 
        #    bad_bright, bad_faint, stars, big_galaxies, small_galaxies
        pred = clf.predict_proba(X)
        
        # Should be >~ 0.9 for valid sources, stars & galaxies in "ir" image
        cat['class_valid'] = pred[:,-3:].sum(axis=1) 
        cat['class_valid'].format = '.2f'
    
    cat['dummy_err'] =  10**(-0.4*(8-23.9))
    cat['dummy_flux'] = cat[total_flux] # detection band
    
    if suffix:
        if verbose:
            print('Write {0}_phot{1}.fits'.format(root, suffix))
            
        cat.write('{0}_phot{1}.fits'.format(root, suffix), overwrite=True)
    
    return cat
Example #12
0
def extract_beams_from_flt(root, bucket, id, clean=True, silent=False):
    """
    Download GrismFLT files and extract the beams file
    """
    import gc
    import boto3
    
    import matplotlib.pyplot as plt
    
    import grizli
    from grizli import fitting, utils, multifit
    from grizli.version import __version__ as grizli__version
    
    utils.set_warnings()
    from grizli.pipeline import auto_script
    
    s3 = boto3.resource('s3')
    s3_client = boto3.client('s3')
    bkt = s3.Bucket(bucket)
    
    # WCS files for ACS
    files = [obj.key for obj in bkt.objects.filter(Prefix='Pipeline/{0}/Extractions/j'.format(root))]
    files += [obj.key for obj in bkt.objects.filter(Prefix='Pipeline/{0}/Extractions/i'.format(root))]

    files += [obj.key for obj in bkt.objects.filter(Prefix='Pipeline/{0}/Extractions/{0}-ir.cat.fits'.format(root))]

    files += [obj.key for obj in bkt.objects.filter(Prefix='Pipeline/{0}/Extractions/fit_args.npy'.format(root))]
    
    download_files = []
    for file in np.unique(files):
        if ('cat.fits' in file) | ('fit_args' in file):
            if os.path.exists(os.path.basename(file)):
                continue
            
            download_files.append(file)
        
    for file in download_files:
        print(file)    
        bkt.download_file(file, os.path.basename(file),
                          ExtraArgs={"RequestPayer": "requester"})
    
    # Read the catalog
    ircat = utils.read_catalog('{0}-ir.cat.fits'.format(root))
    ix = ircat['NUMBER'] == id
    object_rd = (ircat['X_WORLD'][ix], ircat['Y_WORLD'][ix])
    del(ircat)
    
    # One beam at a time
    beams = None
    
    flt_files = []
    for file in files:
        if 'GrismFLT.fits' in file:
            flt_files.append(file)
    
    if not silent:
        print('Read {0} GrismFLT files'.format(len(flt_files)))
    
    if os.path.exists('{0}_fit_args.npy'.format(root)):
        args_file = '{0}_fit_args.npy'.format(root)
    else:
        args_file = 'fit_args.npy'
        
    for i, file in enumerate(flt_files):
        if not silent:
            print('# Read {0}/{1}'.format(i+1, len(flt_files)))

        flt, ext, _, _ = os.path.basename(file).split('.')          
        if flt.startswith('i'):
            fl = 'flt'
        else:
            fl = 'flc'
        
        out_files = ['{0}_{2}.{1}.wcs.fits'.format(flt, ext, fl), 
                     '{0}.{1}.GrismFLT.fits'.format(flt, ext), 
                     '{0}.{1}.GrismFLT.pkl'.format(flt, ext)]
        
        exp_has_id = False
        
        for j, f_j in enumerate(out_files):             
            aws_file = os.path.join(os.path.dirname(file), f_j)
            if not silent:
                print('  ', aws_file)
            
            if not os.path.exists(f_j):
                bkt.download_file(aws_file, f_j, 
                                  ExtraArgs={"RequestPayer": "requester"})
            
            # WCS file, check if object in footprint
            if f_j.endswith('.wcs.fits'):
                #exp_has_id = check_object_in_footprint(id, f_j, ircat)
                exp_has_id = check_object_in_footprint(None, f_j, None, rd=object_rd)
                if not exp_has_id:
                    if clean:
                        os.remove(f_j)
                    break
        
        if not exp_has_id:
            continue
                
        beams_i =                           auto_script.extract(field_root=root, maglim=[13,24], prior=None, MW_EBV=0.00, ids=id, pline={}, fit_only_beams=True, run_fit=False, poly_order=7, master_files=[os.path.basename(file)], grp=None, bad_pa_threshold=None, fit_trace_shift=False, size=32, diff=True, min_sens=0.02, skip_complete=True, fit_args={}, args_file=args_file, get_only_beams=True)
        
        # Remove the GrismFLT file    
        for f_j in out_files:
            if ('GrismFLT' in f_j) & clean:
                os.remove(f_j)

        if beams is None:
            beams = beams_i
        else:
            beams.extend(beams_i)
    
    # Garbage collector
    gc.collect()
        
    if not beams:
        print('No beams found for {0} id={1}'.format(root, id))
        return False
    
    # Grism Object
    args = np.load(args_file, allow_pickle=True)[0]
    mb = multifit.MultiBeam(beams, **args)
    mb.write_master_fits()
    
    # 1D spectrum with R=30 fit
    if True:
        bin_steps, step_templ = utils.step_templates(wlim=[5000, 18000.0], 
                                                     R=30, round=10)  

        tfit = mb.template_at_z(z=0, templates=step_templ,
                                fit_background=True, fitter='lstsq', 
                                get_uncertainties=2)
        
        fig1 = mb.oned_figure(figsize=[5,3], tfit=tfit, show_beams=True, 
                              scale_on_stacked=True, ylim_percentile=5)
                              
        outroot='{0}_{1:05d}.R{2:.0f}'.format(root, id, 30)
        hdu = mb.oned_spectrum_to_hdu(outputfile=outroot+'.fits', 
                                              tfit=tfit, wave=bin_steps)                     
        
        fig1.savefig(outroot+'.png')
        del(hdu)
        
        # Drizzled spectrum
        hdu, fig = mb.drizzle_grisms_and_PAs(fcontam=args['fcontam'],
                                             flambda=False, 
                                             kernel='point', size=32, 
                                             zfit=tfit, diff=False)

        hdu[0].header['GRIZLIV'] = (grizli__version, 'Grizli version')
                                             
        fig.savefig('{0}_{1:05d}.stack.png'.format(root, id))

        hdu.writeto('{0}_{1:05d}.stack.fits'.format(root, id), 
                    overwrite=True)
        
        plt.close('all')
        del(hdu)
        
    outfiles = ['{0}_{1:05d}.beams.fits'.format(root, id)]
    outfiles += glob.glob(outroot+'*')
    outfiles += glob.glob('{0}_{1:05d}.stack*'.format(root, id))
    
    return(outfiles)
Example #13
0
def run_root(root='j002532m1223', min_zoom=2, get_grism=True):
    """
    Prepare images for fitsmap.convert
    """
    from grizli.pipeline import auto_script
    from grizli import utils
    import eazy.utils

    from fitsmap import convert

    print('sync')

    os.system(
        f'aws s3 sync s3://grizli-v1/Pipeline/{root}/Prep/ {root}/ --exclude "*" --include "*sci.fits.gz" --include "*phot.fits" --include "*seg.fits.gz"'
    )
    os.system(
        f'aws s3 sync s3://grizli-v1/Pipeline/{root}/IRAC/ {root}/ --exclude "*" --include "*sci.fits*" --include "*model.fits"'
    )
    os.system(
        f'aws s3 sync s3://grizli-v1/Pipeline/{root}/Map/ {root}/ --exclude "*" --include "{root}.*png"'
    )

    os.chdir(root)

    if not os.path.exists(f'{root}.rgb.png'):
        _ = auto_script.field_rgb(root=root,
                                  xsize=6,
                                  full_dimensions=True,
                                  HOME_PATH=None,
                                  gzext='*',
                                  suffix='.rgb',
                                  output_format='png')

    # IR
    files = glob.glob(f'{root}-[if][r01]*sci.fits*')
    files.sort()
    filts = [file.split(f'{root}-')[1].split('_')[0] for file in files]
    for filt in filts:
        if os.path.exists(f'{root}.{filt}.png'):
            continue

        _ = auto_script.field_rgb(root=root,
                                  xsize=6,
                                  full_dimensions=True,
                                  HOME_PATH=None,
                                  gzext='*',
                                  filters=[filt],
                                  suffix=f'.{filt}',
                                  output_format='png',
                                  invert=True,
                                  scl=2)

    # Optical, 2X pix
    files = glob.glob(f'{root}-[f][2-8]*sci.fits*')
    files.sort()
    filts = [file.split(f'{root}-')[1].split('_')[0] for file in files]
    for filt in filts:
        if os.path.exists(f'{root}.{filt}.png'):
            continue

        _ = auto_script.field_rgb(root=root,
                                  xsize=6,
                                  full_dimensions=2,
                                  HOME_PATH=None,
                                  gzext='*',
                                  filters=[filt],
                                  suffix=f'.{filt}',
                                  output_format='png',
                                  invert=True,
                                  scl=2)

    # Spitzer
    if glob.glob(f'{root}-ch*fits*'):
        import reproject
        out_img = pyfits.open(f'{root}-ir_drz_sci.fits.gz')
        repr_hdu = out_img[0]
        # repr_hdu = utils.make_maximal_wcs([out_wcs], pixel_scale=0.2,
        #                                   verbose=False, pad=0, poly_buffer=0)
        repr_wcs = pywcs.WCS(repr_hdu.header)

        mosaics = glob.glob(f'{root}-ch[12]*sci.fits*')
        mosaics.sort()
        for mos in mosaics:
            ch = mos.split(f'{root}-')[1].split('_')[0]
            if os.path.exists(f'{root}.{ch}.png'):
                continue

            print(f'Reproject {ch}')
            in_img = pyfits.open(mos)
            in_wcs = pywcs.WCS(in_img[0].header)

            reproj = utils.blot_nearest_exact(in_img[0].data,
                                              in_wcs,
                                              repr_wcs,
                                              scale_by_pixel_area=False)

            pyfits.writeto(f'{root}-{ch}s_drz_sci.fits',
                           data=reproj,
                           header=repr_hdu.header,
                           overwrite=True)

            ext = [ch + 's']

            if os.path.exists(f'{root}-{ch}_model.fits'):
                # resid
                print(f' {ch} model')
                in_img = pyfits.open(f'{root}-{ch}_model.fits')
                reproj = utils.blot_nearest_exact(in_img[1].data,
                                                  in_wcs,
                                                  repr_wcs,
                                                  scale_by_pixel_area=False)
                pyfits.writeto(f'{root}-{ch}m_drz_sci.fits',
                               data=reproj,
                               header=repr_hdu.header,
                               overwrite=True)
                ext.append(ch + 'm')

            for filt in ext:
                _ = auto_script.field_rgb(root=root,
                                          xsize=6,
                                          full_dimensions=True,
                                          HOME_PATH=None,
                                          gzext='',
                                          filters=[filt],
                                          suffix=f'.{filt}',
                                          output_format='png',
                                          invert=True,
                                          scl=2)

    if not os.path.exists(f'{root}.seg.png'):
        sfig = make_seg(f'{root}-ir_seg.fits.gz', outfile=f'{root}.seg.png')

    filelist = []
    for q in ['*f[2-8]', '*f[01]*', '*ir*', '*ch[12]', '*seg', '*rgb']:
        l_i = glob.glob(q + '*png')
        l_i.sort()
        filelist.extend(l_i)

    ph = utils.read_catalog(f'{root}_phot.fits')
    ph['id'] = ph['number']
    ph['ra'].format = '.6f'
    ph['dec'].format = '.6f'
    ph['mag'] = ph['mag_auto']
    ph['mag'].format = '.2f'

    ph['query'] = [
        eazy.utils.query_html(r, d).split(') ')[1]
        for r, d in zip(ph['ra'], ph['dec'])
    ]

    ph['id', 'ra', 'dec', 'query', 'mag'].write('phot.cat',
                                                format='ascii.csv',
                                                overwrite=True)

    filelist += ['phot.cat']

    if get_grism:
        from grizli.aws import db
        engine = db.get_db_engine()
        gr = db.from_sql(
            f"select root, id, ra, dec, z_map from redshift_fit where root='{root}'",
            engine)

        print(f'grism.cat: {len(gr)} sources')

        if len(gr) > 0:
            gr['query'] = [
                eazy.utils.query_html(r, d).split(') ')[1]
                for r, d in zip(gr['ra'], gr['dec'])
            ]

            gr['stack'] = [
                f'<img src="https://s3.amazonaws.com/grizli-v1/Pipeline/{root}/Extractions/{root}_{id:05d}.stack.png"  height="100px"/>'
                for id in gr['id']
            ]
            gr['full'] = [
                f'<img src="https://s3.amazonaws.com/grizli-v1/Pipeline/{root}/Extractions/{root}_{id:05d}.full.png"  height="100px"/>'
                for id in gr['id']
            ]
            gr['line'] = [
                f'<img src="https://s3.amazonaws.com/grizli-v1/Pipeline/{root}/Extractions/{root}_{id:05d}.line.png" height="80px"/>'
                for id in gr['id']
            ]

            gr['ra'].format = '.6f'
            gr['dec'].format = '.6f'
            gr['z_map'].format = '.4f'

            gr['id', 'ra', 'dec', 'query', 'z_map', 'stack', 'full',
               'line'].write('grism.cat', format='ascii.csv', overwrite=True)

            filelist += ['grism.cat']

    convert.MPL_CMAP = 'gray_r'
    convert.cartographer.MARKER_HTML_WIDTH = '650px'
    convert.cartographer.MARKER_HTML_HEIGHT = '440px'
    convert.POPUP_CSS = [
        "span { text-decoration:underline; font-weight:bold; line-height:12pt; }",
        "tr { line-height: 7pt; }",
        "table { width: 100%; }",
        "img { height: 100px; width: auto; }",
    ]

    convert.dir_to_map("./",
                       filelist=filelist,
                       out_dir="output",
                       cat_wcs_fits_file=f"{root}-ir_drz_sci.fits.gz",
                       catalog_delim=',',
                       min_zoom=min_zoom,
                       task_procs=False,
                       image_engine='MPL')
    plt.close('all')

    if os.path.exists('output/index.html'):
        os.system(
            f'aws s3 sync output/ s3://grizli-v1/Pipeline/{root}/Map/ --acl public-read --quiet'
        )
        os.system(
            f'aws s3 sync ./ s3://grizli-v1/Pipeline/{root}/Map/ --exclude "*" --include "{root}.*png" --acl public-read'
        )
Example #14
0
def fit_quasars_lambda(root='j100025+021706', newfunc=True):
    import time
    import os
    import numpy as np
    import boto3
    import json
    from astropy import table

    from grizli import utils

    s3 = boto3.resource('s3')
    bkt = s3.Bucket('aws-grivam')
    #bkt.download_file('/Pipeline/wisps-aug10.fits', 'wisps-aug10', ExtraArgs={"RequestPayer": "requester"})

    fit = utils.read_catalog('wisps-aug10.fits')

    fit = table.vstack([
        utils.read_catalog('grizli-3dhst-18.08.05.fits'),
        utils.read_catalog('grizli-18.05.17-full.fits')
    ])

    # All
    fit = table.vstack([
        utils.read_catalog('wisps-aug10.fits'),
        utils.read_catalog('grizli-3dhst-18.08.05.fits'),
        utils.read_catalog('grizli-18.05.17-full.fits')
    ])

    fit = utils.read_catalog('grizli-3dhst-18.08.05.fits')
    fit = utils.read_catalog('grizli-18.05.17-full.fits')

    roots = np.unique(fit['root'])

    pa = np.polyfit([16, 21, 24, 25.5], [4.5, 2.6, 2.3, 2.1], 2)
    py = np.polyval(pa, fit['mag_auto'])
    py = np.interp(fit['mag_auto'], [16, 18, 19, 21, 24, 25.5],
                   [7, 7, 5, 2.6, 2.3, 2.1])
    point_source = (fit['flux_radius'] < py)  #& (fit['mag_auto'] < 23)
    fit['is_point'] = point_source * 2 - 1

    bad = (fit['mag_auto'] > 22) & (fit['flux_radius'] < 1.2)
    fit['too_small'] = bad * 2 - 1

    stars = (fit['is_point'] > 0) & (fit['too_small'] < 0) & (fit['mag_auto'] <
                                                              24.5)
    stars &= fit['ninput'] < 40

    beams = [
        'Pipeline/{0}/Extractions/{0}_{1:05d}.beams.fits'.format(root, id)
        for root, id in zip(fit['root'][stars], fit['id'][stars])
    ]
    ninput = fit['ninput'][stars]

    # Auth to create a Lambda function (credentials are picked up from above .aws/credentials)
    session = boto3.Session()

    # Make sure Lambda is running in the same region as the HST public dataset
    client = session.client('lambda', region_name='us-east-1')

    func = 'GrizliFitQuasar'

    print('Lambda function: {0}'.format(func))

    # Auth to create a Lambda function
    session = boto3.Session()
    client = session.client('lambda', region_name='us-east-1')

    done_files = [
        os.path.basename(obj.key)
        for obj in bkt.objects.filter(Prefix='Pipeline/QuasarFit/')
    ]

    count = 0

    for beam, ninp in zip(beams, ninput):
        # if count > 400:
        #     break

        if os.path.basename(beam).replace('.beams', '.full') in done_files:
            #print('{0} {1} {2}'.format(count, 'Skip', beam))
            continue
        else:
            count += 1
            print('{0} {1} {2} ({3})'.format(count, '    ', beam, ninp))

        event = {
            's3_object_path': beam,
            'verbose': "True",
            "use_psf": str(ninp < 15),
        }

        # Invoke Lambda function
        response = client.invoke(FunctionName=func,
                                 InvocationType='Event',
                                 LogType='Tail',
                                 Payload=json.dumps(event))

    files = glob.glob('*full.fits')
    roots = np.unique([file.split('_')[0] for file in files])
    for root in roots:
        if os.path.exists('{0}.info.fits'.format(root)):
            continue

        try:
            auto_script.summary_catalog(field_root=root,
                                        dzbin=0.01,
                                        use_localhost=False,
                                        filter_bandpasses=None)
        except:
            pass

    #
    #fit = utils.read_catalog('wisps-aug10.fits')

    from astropy import table
    files = glob.glob('*info.fits')
    roots = np.unique([file.split('.info')[0] for file in files])

    tabs = []
    for root in roots:
        print(root)
        file = '{0}.info.fits'.format(root)
        if os.path.exists(file):
            tabs.append(utils.read_catalog(file))

    tab = table.vstack(tabs)

    tab['bic_diff_spl'] = tab['bic_spl'] - tab['bic_temp']
    tab['bic_diff_spl'].format = '.1f'

    tab['chinusp'] = tab['chi2spl'] / tab['dof']
    tab['chinusp'].format = '.1f'

    tab['star14'] = tab['splf03'] / np.maximum(tab['splf04'], tab['sple04'])
    tab['star14'].format = '.1f'

    tab['star14e'] = tab['splf03'] / np.sqrt(tab['sple03']**2 +
                                             tab['sple04']**2)
    tab['star14e'].format = '.2f'

    tab['spl14'] = tab['splf03'] / tab['sple03']
    tab['spl14'].format = '.0f'

    cols = [
        'root', 'idx', 'ra', 'dec', 't_g102', 't_g141', 'mag_auto', 'is_point',
        'z_map', 'chinu', 'bic_diff', 'chinusp', 'bic_diff_spl', 'star14',
        'star14e', 'spl14', 'zwidth1', 'a_image', 'sn_SIII', 'sn_Ha',
        'sn_OIII', 'sn_Hb', 'sn_OII', 'log_mass', 'aws_png_stack', 'png_full',
        'png_line'
    ]

    newcol = {}
    for col in ['png_full', 'png_line']:
        newcol[col] = [item.replace('+', '%2B') for item in tab[col]]
        tab.remove_column(col)
        tab[col] = newcol[col]

    idx, dr = fit.match_to_catalog_sky(tab)
    for c in cols:
        if c not in tab.colnames:
            tab[c] = fit[c][idx]

    clip = tab['bic_diff'] > 0

    outroot = 'point_sources_aug15'

    tab.write(outroot + '.fits', overwrite=True)

    tab[cols].filled(fill_value=-1).write_sortable_html(
        outroot + '.html',
        replace_braces=True,
        localhost=False,
        max_lines=50000,
        table_id=None,
        table_class='display compact',
        css=None,
        filter_columns=[
            'mag_auto', 'z_map', 'z02', 'z97', 'bic_diff', 'chinusp', 'star14',
            'star14e', 'spl14', 'bic_diff_spl', 'chinu', 'a_image',
            'flux_radius', 'zwidth1', 'is_point', 'sn_SIII', 'sn_Ha',
            'sn_OIII', 'sn_Hb', 'sn_OII'
        ],
        use_json=True)

    sleep_time = 303 * np.ceil(len(beams) / 950)
    print('{0}: sleep {1}'.format(time.ctime(), sleep_time))

    time.sleep(sleep_time)

    # Status again to check products
    beams, files = get_needed_paths(root)
Example #15
0
def eazy_photoz(root, force=False, object_only=True, apply_background=True, aper_ix=1, apply_prior=False, beta_prior=True, get_external_photometry=False, external_limits=3, external_sys_err=0.3, external_timeout=300, sys_err=0.05, z_step=0.01, z_min=0.01, z_max=12, total_flux='flux_auto', auto_corr=True, compute_residuals=False, dummy_prior=False, extra_rf_filters=[]):
    
    import os
    import eazy
    import numpy as np
    
    from grizli import utils
    import mastquery.utils
    
    if (os.path.exists('{0}.eazypy.self.npy'.format(root))) & (not force):
        self = np.load('{0}.eazypy.self.npy'.format(root))[0]
        zout = utils.read_catalog('{0}.eazypy.zout.fits'.format(root))
        cat = utils.read_catalog('{0}_phot_apcorr.fits'.format(root))
        return self, cat, zout
        
    trans = {'f098m':201, 'f105w':202, 'f110w':241, 'f125w':203, 'f140w':204, 'f160w':205, 'f435w':233, 'f438w':211, 'f606w':236, 'f625w':237, 'f814w':239, 'f702w':15, 'f555w':235, 'f350lp':339, 'f475w':212, 'f775w':238, 'f850lp':240}
    #trans.pop('f814w')
    
    print('Apply catalog corrections')
    apply_catalog_corrections(root, suffix='_apcorr')
    
    cat = utils.read_catalog('{0}_phot_apcorr.fits'.format(root))
    filters = []
    for c in cat.meta:
        if c.endswith('_ZP'):
            filters.append(c.split('_ZP')[0].lower())
        
    # Translate
    fp = open('zphot.translate','w')
    for f in filters:
        if f in trans:
            fp.write('{0}_tot_{1} F{2}\n'.format(f, aper_ix, trans[f]))
            fp.write('{0}_etot_{1} E{2}\n'.format(f, aper_ix, trans[f]))
    
    fp.write('irac_ch1_flux F18\n')
    fp.write('irac_ch1_err  E18\n')
    
    # For zeropoint
    if dummy_prior:
        fp.write('dummy_flux F205x\n')
        fp.write('dummy_err  E205x\n')
            
    fp.close()
    
    params = {}
    params['CATALOG_FILE'] = '{0}_phot_apcorr.fits'.format(root)
    params['Z_STEP'] = z_step
    params['MAIN_OUTPUT_FILE'] = '{0}.eazypy'.format(root)
    
    params['Z_MAX'] = z_max
    params['MW_EBV'] = mastquery.utils.get_irsa_dust(cat['ra'].mean(), cat['dec'].mean())
    params['PRIOR_ABZP'] = 23.9
    
    params['SYS_ERR'] = sys_err
    params['CAT_HAS_EXTCORR'] = False
    
    # Pick prior filter, starting from reddest
    for f in ['f435w', 'f606w', 'f814w', 'f105w', 'f110w', 'f125w', 'f140w', 'f160w'][::-1]:
        if f in filters:
            if dummy_prior:
                params['PRIOR_FILTER'] = 'dummy_flux'
            else:
                params['PRIOR_FILTER'] = trans[f]
            
            mag = 23.9-2.5*np.log10(cat['{0}_corr_{1}'.format(f, aper_ix)])
            break
    #
    if os.path.exists('templates/fsps_full/tweak_fsps_QSF_11_v3_noRed.param.fits'):
        params['TEMPLATES_FILE'] = 'templates/fsps_full/tweak_fsps_QSF_11_v3_noRed.param'
    else:
        params['TEMPLATES_FILE'] = 'templates/fsps_full/tweak_fsps_QSF_12_v3.param'

    zpfile = None
    load_products = False

    if (not os.path.exists('FILTER.RES.latest') or 
        not os.path.exists('templates')):
        try:
            # should work with eazy-py >= 0.2.0
            eazy.symlink_eazy_inputs(path=None)
        except:
            print("""
The filter file `FILTER.RES.latest` and `templates` directory were not
found in the working directory and the automatic command to retrieve them 
failed: 
    
    >>> import eazy; eazy.symlink_eazy_inputs(path=None)  

Run it with `path` pointing to the location of the `eazy-photoz` repository.""")
            return False
            
    self = eazy.photoz.PhotoZ(param_file=None, translate_file='zphot.translate', zeropoint_file=zpfile, params=params, load_prior=True, load_products=load_products)
    
    if object_only:
        return self
        
    idx = np.arange(self.NOBJ)
    
    #sample = (mag < 27) #& (self.cat['star_flag'] != 1)
    #sample |= (self.cat['z_spec'] > 0)
    sample = np.isfinite(self.cat['id']) #mag)
    
    for iter in range(1+(get_external_photometry & compute_residuals)*1):
        self.fit_parallel(idx[sample], n_proc=10)
        if compute_residuals:
            self.error_residuals()
    
    self.standard_output(prior=apply_prior, beta_prior=beta_prior, extra_rf_filters=extra_rf_filters)
    
    zout = utils.read_catalog('{0}.eazypy.zout.fits'.format(root))
    
    np.save('{0}.eazypy.self.npy'.format(root), [self])
    
    return self, cat, zout
Example #16
0
def summary_catalog(field_root='', dzbin=0.01, use_localhost=True, filter_bandpasses=None, files=None, cdf_sigmas=None, strip_empty_columns=False, **kwargs):
    """
    Make redshift histogram and summary catalog / HTML table
    """
    import os
    import time

    import numpy as np
    from matplotlib.ticker import FixedLocator
    import matplotlib.pyplot as plt

    import astropy.table

    try:
        from .. import fitting, prep, utils
        from . import auto_script
    except:
        from grizli import prep, utils, fitting
        from grizli.pipeline import auto_script

    if filter_bandpasses is None:
        import pysynphot as S
        filter_bandpasses = [S.ObsBandpass(bpstr) for bpstr in ['acs,wfc1,f814w', 'wfc3,ir,f105w', 'wfc3,ir,f110w', 'wfc3,ir,f125w', 'wfc3,ir,f140w', 'wfc3,ir,f160w']]

    if os.path.exists('{0}.info.fits'.format(field_root)):
        orig = utils.read_catalog('{0}.info.fits'.format(field_root))
        all_files = glob.glob('{0}*full.fits'.format(field_root))
        all_files.sort()

        print('{0}.info.fits: {1} objects.  Found {2} full.fits files, checking modify dates.'.format(field_root, len(orig), len(all_files)))
        info_mtime = os.stat('{0}.info.fits'.format(field_root)).st_mtime
        keep = np.ones(len(orig), dtype=bool)

        files = []
        for file in all_files:
            id = int(file.split('_')[1].split('.full')[0])
            if id not in orig['id']:
                files.append(file)
            else:
                full_mtime = os.stat(file).st_mtime
                if full_mtime > info_mtime:
                    files.append(file)
                    keep[orig['id'] == id] = False

        orig = orig[keep]

        if len(files) == 0:
            print('Found {0}.info.fits, and {1} new objects.\n'.format(field_root, len(files)))
            return False
        else:
            print('Found {0}.info.fits.  Adding {1} new objects.\n'.format(field_root, len(files)))

    else:
        orig = None

    # SUmmary catalog
    fit = fitting.make_summary_catalog(target=field_root, sextractor=None,
                                       filter_bandpasses=filter_bandpasses,
                                       files=files, cdf_sigmas=cdf_sigmas,
                                       write_table=(dzbin is not None))
    fit.meta['root'] = field_root

    if orig is not None:
        if len(fit) > 0:
            fit = astropy.table.vstack([orig, fit])
            if dzbin is not None:
                fit.write('{0}.info.fits'.format(field_root), overwrite=True)

    mtime = []
    for i in range(len(fit)):
        full_file = '{0}_{1:05d}.full.fits'.format(fit['root'][i], fit['id'][i])
        if os.path.exists(full_file):
            mtime.append(time.ctime(os.stat(full_file).st_mtime))
        else:
            mtime.append('-')

    fit['mtime'] = mtime

    # Add photometric catalog
    try:
        catalog = glob.glob('{0}-*.cat.fits'.format(field_root))[0]
        sex = utils.GTable.gread(catalog)
        # try:
        # except:
        #     sex = utils.GTable.gread('../Prep/{0}-ir.cat.fits'.format(field_root), sextractor=True)

        idx = np.arange(len(sex))
        sex_idx = np.array([idx[sex['NUMBER'] == id][0] for id in fit['id']])

        fit['ellipticity'] = (sex['B_IMAGE']/sex['A_IMAGE'])[sex_idx]

        for col in ['MAG_AUTO', 'FLUX_RADIUS', 'A_IMAGE']:
            fit[col.lower()] = sex[col][sex_idx]
    except:
        pass

    fit = set_column_formats(fit)

    if strip_empty_columns:
        # Remove float columns with only NaN values
        #print('Strip empty columns')
        empty_cols = []
        for c in fit.colnames:
            try:
                isfin = np.isfinite(fit[c])
                if isfin.sum() == 0:
                    empty_cols.append(c)
            except:
                pass

        for c in empty_cols:
            fit.remove_column(c)

    # Just return the table if dzbin parameter not specified
    if dzbin is None:
        return fit

    # Overwrite with additional sextractor keywords
    fit.write('{0}.info.fits'.format(field_root), overwrite=True)

    clip = (fit['chinu'] < 2.0) & (fit['log_risk'] < -1)
    clip = (fit['chinu'] < 2.0) & (fit['zq'] < -3) & (fit['zwidth1']/(1+fit['z_map']) < 0.005)
    clip &= fit['bic_diff'] > 30  # -40

    bins = utils.log_zgrid(zr=[0.1, 3.5], dz=dzbin)

    fig = plt.figure(figsize=[6, 4])
    ax = fig.add_subplot(111)

    ax.hist(np.log10(1+fit['z_map']), bins=np.log10(1+bins), alpha=0.2, color='k')
    ax.hist(np.log10(1+fit['z_map'][clip]), bins=np.log10(1+bins), alpha=0.8)

    xt = np.array(np.arange(0.25, 3.55, 0.25))
    ax.xaxis.set_minor_locator(FixedLocator(np.log10(1+xt)))
    xt = np.array([1, 2, 3])
    ax.set_xticks(np.log10(1+xt))
    ax.set_xticklabels(xt)

    ax.set_xlabel('z')
    ax.set_ylabel(r'$N$')

    ax.grid()
    ax.text(0.05, 0.95, field_root, ha='left', va='top', transform=ax.transAxes)

    fig.tight_layout(pad=0.2)
    fig.savefig('{0}_zhist.png'.format(field_root))

    cols = ['root', 'mtime', 'idx', 'ra', 'dec', 'mag_auto', 't_g800l', 't_g102', 't_g141', 'z_map', 'chinu', 'bic_diff', 'zwidth1', 'd4000', 'd4000_e', 'png_stack', 'png_full', 'png_rgb', 'png_line']

    for i in range(len(cols))[::-1]:
        if cols[i] not in fit.colnames:
            cols.pop(i)

    filter_columns = ['ra', 'dec', 'mag_auto', 't_g800l', 't_g102', 't_g141', 'z_map', 'chinu', 'bic_diff', 'zwidth1', 'd4000', 'd4000_e']

    fit[cols].write_sortable_html(field_root+'-fit.html', replace_braces=True, localhost=use_localhost, max_lines=50000, table_id=None, table_class='display compact', css=None, filter_columns=filter_columns, use_json=(not use_localhost))

    fit[cols][clip].write_sortable_html(field_root+'-fit.zq.html', replace_braces=True, localhost=use_localhost, max_lines=50000, table_id=None, table_class='display compact', css=None, filter_columns=filter_columns, use_json=(not use_localhost))

    zstr = ['{0:.3f}'.format(z) for z in fit['z_map'][clip]]
    prep.table_to_regions(fit[clip], output=field_root+'-fit.zq.reg', comment=zstr)

    if False:

        fit = utils.GTable.gread('{0}.info.fits'.format(root))
        fit = auto_script.set_column_formats(fit)

        cols = ['id', 'ra', 'dec', 'mag_auto', 't_g102', 't_g141', 'sn_Ha', 'sn_OIII', 'sn_Hb', 'z_map', 'log_risk', 'log_pdf_max', 'zq', 'chinu', 'bic_diff', 'zwidth1', 'png_stack', 'png_full', 'png_line']

        #clip = ((fit['sn_Ha'] > 5) | (fit['sn_OIII'] > 5)) & (fit['bic_diff'] > 50) & (fit['chinu'] < 2)
        #clip = (fit['sn_OIII'] > 5) & (fit['bic_diff'] > 100) & (fit['chinu'] < 3)

        test_line = {}
        for li in ['Ha', 'OIII', 'OII']:
            test_line[l] = (fit['sn_'+li] > 5) & (fit['err_'+li] < 1.e-16)

        clip = (test_line['Ha'] | test_line['OIII'] | test_line['OII']) & (fit['bic_diff'] > 50) & (fit['chinu'] < 2)

        star = fit['flux_radius'] < 2.3
        clip &= ~star

        jh = fit['mag_wfc3,ir,f125w'] - fit['mag_wfc3,ir,f160w']
        clip = (fit['chinu'] < 2) & (jh > 0.9) & (fit['mag_wfc3,ir,f160w'] < 23)
        fit['jh'] = jh
        fit['jh'].format = '.1f'

        fit['dmag'] = fit['mag_wfc3,ir,f140w'] - fit['mag_auto']
        fit['dmag'].format = '.1f'

        cols = ['idx', 'ra', 'dec', 'mag_auto', 'jh', 'dmag', 't_g141', 'sn_Ha', 'sn_OIII', 'sn_Hb', 'z_map', 'log_risk', 'log_pdf_max', 'zq', 'chinu', 'bic_diff', 'zwidth1', 'png_stack', 'png_full', 'png_line']

        fit[cols][clip].write_sortable_html(root+'-fit.lines.html', replace_braces=True, localhost=False, max_lines=50000, table_id=None, table_class='display compact', css=None)
Example #17
0
def full_hawki_query(rd=None, query_result=None, eso=None):
    """
    Query all HAWKI observations....
    """
    import os
    import numpy as np
    import matplotlib.pyplot as plt

    from shapely.geometry import Polygon, Point
    from descartes import PolygonPatch
    from shapely import affinity

    from grizli import utils
    from mastquery import query, overlaps

    if eso is None:
        eso = get_eso()

    if query_result is None:
        _, kwargs, res = full_query(eso=eso)
    else:
        kwargs, res = query_result

    # surveys = 092.A-0472

    # CHArGE fields
    from grizli.aws import db
    import astropy.units as u
    from astropy.coordinates import SkyCoord

    engine = db.get_db_engine()
    if rd is None:
        ch = db.from_sql(
            "SELECT field_root, field_ra as ra, field_dec as dec, log FROM charge_fields where log LIKE '%%Finish%%'",
            engine)
    else:
        ra, dec = rd
        ch = utils.GTable()
        ch['ra'] = [ra]
        ch['dec'] = [dec]

        ch['field_root'] = [
            utils.radec_to_targname(
                ra=ra,
                dec=dec,
                round_arcsec=(4, 60),
                precision=2,
                targstr='j{rah}{ram}{ras}{sign}{ded}{dem}',
                header=None,
            )
        ]

    idx, dr = ch.match_to_catalog_sky(res)

    has_hawki = dr < 10 * u.arcmin

    import scipy.spatial
    ch_rd = SkyCoord(ch['ra'], ch['dec'], unit='deg')
    ch_xyz = ch_rd.cartesian.get_xyz().value
    ctree = scipy.spatial.cKDTree(ch_xyz.T)

    hawki_rd = SkyCoord(res['RA'], res['DEC'], unit='deg')
    hawki_xyz = hawki_rd.cartesian.get_xyz().value
    htree = scipy.spatial.cKDTree(hawki_xyz.T)

    r = 30. / 60 / 360. * 2

    tr = ctree.query_ball_tree(htree, r)
    n_hawki = np.array([len(t) for t in tr])

    # Figures
    idx = np.where(n_hawki > 0)[0]

    xsize = 5
    px, py = 0.45, 0.2

    for i in idx:
        field = ch['field_root'][i]
        print(i, field)
        if os.path.exists(f'{field}_hawki.png'):
            continue

        field = ch['field_root'][i]

        #tab = utils.read_catalog(f'../FieldsSummary/{field}_footprint.fits')
        if os.path.exists(f'{field}_footprint.fits'):
            tab = utils.read_catalog(f'{field}_footprint.fits')
            meta = tab.meta

            xr = (meta['XMIN'], meta['XMAX'])
            yr = (meta['YMIN'], meta['YMAX'])
            ra, dec = meta['BOXRA'], meta['BOXDEC']

            cosd = np.cos(dec / 180 * np.pi)
            dx = (xr[1] - xr[0]) * cosd * 60
            dy = (yr[1] - yr[0]) * 60

            box_width = np.maximum(dx, dy)
            #query_size = np.maximum(min_size, box_width/2)/60.

            p_hst = None
            p_ir = None

            for j, fph in enumerate(tab['footprint']):
                ps, is_bad, poly = query.instrument_polygon(tab[j])
                if not hasattr(ps, '__len__'):
                    ps = [ps]

                for p in ps:
                    p_j = Polygon(p).buffer(0.001)
                    if p_hst is None:
                        p_hst = p_j
                    else:
                        p_hst = p_hst.union(p_j)

                    if tab['instrument_name'][j] == 'WFC3/IR':
                        if p_ir is None:
                            p_ir = p_j
                        else:
                            p_ir = p_ir.union(p_j)
        else:
            cosd = np.cos(dec / 180 * np.pi)
            p_hst = None
            p_ir = None

        ##############################
        fig = plt.figure(figsize=[6, 6])

        ax = fig.add_subplot(111)
        ax.scatter(ra, dec, zorder=1000, marker='+', color='k')

        # HAWKI
        h_p = None
        for j in tr[i]:
            p = Point(res['RA'][j], res['DEC'][j]).buffer(4.1 / 60)
            p = affinity.scale(p, xfact=1. / cosd)

            # ax.add_patch(PolygonPatch(p, color='r', alpha=0.1))
            x, y = p.boundary.xy
            ax.plot(x, y, color=utils.MPL_COLORS['r'], alpha=0.05)

            if h_p is None:
                h_p = p
            else:
                h_p = h_p.union(p)

        # If overlap between hawki and HST, query all exposures
        if p_hst is not None:
            hawki_overlap = h_p.intersection(p_hst)
            hawki_un = h_p.union(p_hst)

            if not hasattr(p_hst, '__len__'):
                p_hst = [p_hst]

            if not hasattr(h_p, '__len__'):
                h_p = [h_p]

            for p in p_hst:
                #ax.add_patch(PolygonPatch(p, color='k', alpha=0.2))
                if not hasattr(p.boundary, '__len__'):
                    bs = [p.boundary]
                else:
                    bs = p.boundary

                for b in bs:
                    x, y = b.xy
                    ax.plot(x, y, color=utils.MPL_COLORS['gray'], alpha=0.3)
        else:
            hawki_overlap = h_p
            if not hasattr(h_p, '__len__'):
                h_p = [h_p]

        if p_ir is not None:
            if not hasattr(p_ir, '__len__'):
                p_ir = [p_ir]

            for p in p_ir:
                ax.add_patch(
                    PolygonPatch(p, color=utils.MPL_COLORS['gray'], alpha=0.2))
                x, y = p.boundary.xy
                ax.plot(x, y, color=utils.MPL_COLORS['gray'], alpha=0.3)

        for p in h_p:
            ax.add_patch(
                PolygonPatch(p, color=utils.MPL_COLORS['r'], alpha=0.2))

        targets = [
            '{0}  {1}'.format(res['ProgId'][j], res['Object'][j])
            for j in tr[i]
        ]
        for j, targ in enumerate(np.unique(targets)):
            ixj = np.where(np.array(targets) == targ)[0]
            expt = res['DET NDIT'] * res['DET DIT'] * res['TPL NEXP']

            ax.text(0.02,
                    0.98 - j * 0.03,
                    '{0} {1:.1f}'.format(targ, expt[tr[i]][ixj].sum() / 3600.),
                    ha='left',
                    va='top',
                    transform=ax.transAxes,
                    fontsize=7)

        ax.set_aspect(1. / cosd)
        ax.set_title(field)
        ax.grid()

        #xsize = 4

        dx = np.diff(ax.get_xlim())[0] * cosd * 60
        dy = np.diff(ax.get_ylim())[0] * 60

        fig.set_size_inches(xsize * np.clip(dx / dy, 0.2, 5) + px, xsize + py)
        ax.set_xlim(ax.get_xlim()[::-1])
        overlaps.draw_axis_labels(ax=ax, nlabel=3)

        fig.tight_layout(pad=0.5)
        fig.savefig(f'{field}_hawki.png', dpi=120)
        plt.close('all')

        if (hawki_overlap.area >
                0.0) & (not os.path.exists(f'{field}_hawki.fits')):

            kws = {}
            for k in kwargs:
                kws[k] = kwargs[k].copy()

            kws['column_filters'].pop('tpl_nexp')
            kws['column_filters'].pop('tpl_expno')

            _res = eso.query_instrument('hawki',
                                        pi_coi_name='PI_only',
                                        coord1=ra,
                                        coord2=dec,
                                        box='00 30 00',
                                        **kws)

            if len(_res) > 0:
                print('{0} datasets'.format(len(_res)))
                _res['PI'] = [p.split('/')[0].strip() for p in _res['PI/CoI']]
                _res.write(f'{field}_hawki.fits', overwrite=True)
Example #18
0
def eazy_photoz(root, force=False):

    import os
    import eazy
    import numpy as np
    from grizli import utils

    if (os.path.exists('{0}.eazypy.self.npy'.format(root))) & (not force):
        self = np.load('{0}.eazypy.self.npy'.format(root))[0]
        zout = utils.read_catalog('{0}.eazypy.zout.fits'.format(root))
        cat = utils.read_catalog('{0}_phot_apcorr.fits'.format(root))
        return self, cat, zout

    trans = {
        'f105w': 202,
        'f110w': 241,
        'f125w': 203,
        'f140w': 204,
        'f160w': 205,
        'f435w': 233,
        'f606w': 236,
        'f814w': 239
    }

    cat = utils.read_catalog('{0}_phot.fits'.format(root))
    filters = []
    for c in cat.meta:
        if c.endswith('_ZP'):
            filters.append(c.split('_ZP')[0].lower())

    # Total flux
    apcorr = {}
    for i in range(5):
        if 'flux_aper_{0}'.format(i) in cat.colnames:
            cat['apcorr_{0}'.format(
                i)] = cat['flux_auto'] / cat['flux_aper_{0}'.format(i)]
            for f in filters:
                cat['{0}_corr_{1}'.format(
                    f, i)] = cat['{0}_flux_aper_{1}'.format(
                        f, i)] * cat['apcorr_{0}'.format(i)]
                cat['{0}_ecorr_{1}'.format(
                    f, i)] = cat['{0}_fluxerr_aper_{1}'.format(
                        f, i)] * cat['apcorr_{0}'.format(i)]

                bad = cat['{0}_mask_aper_{1}'.format(
                    f, i)] > 0.2 * np.percentile(
                        cat['{0}_mask_aper_{1}'.format(f, i)], 95)
                cat['{0}_corr_{1}'.format(f, i)][bad] = -99
                cat['{0}_ecorr_{1}'.format(f, i)][bad] = -99

    cat.rename_column('number', 'id')
    cat['z_spec'] = cat['id'] * 0. - 1
    cat.write('{0}_phot_apcorr.fits'.format(root), overwrite=True)

    # Translate
    ix = 0
    fp = open('zphot.translate', 'w')
    for f in filters:
        if f in trans:
            fp.write('{0}_corr_{1} F{2}\n'.format(f, ix, trans[f]))
            fp.write('{0}_ecorr_{1} E{2}\n'.format(f, ix, trans[f]))
    fp.close()

    params = {}
    params['CATALOG_FILE'] = '{0}_phot_apcorr.fits'.format(root)
    params['Z_STEP'] = 0.01
    params['MAIN_OUTPUT_FILE'] = '{0}.eazypy'.format(root)

    params['Z_MAX'] = 12
    params['MW_EBV'] = 0.
    params['PRIOR_ABZP'] = 23.9

    # Pick prior filter, starting from reddest
    for f in [
            'f435w', 'f606w', 'f814w', 'f105w', 'f110w', 'f125w', 'f140w',
            'f160w'
    ][::-1]:
        if f in filters:
            params['PRIOR_FILTER'] = trans[f]
            mag = 23.9 - 2.5 * np.log10(cat['{0}_corr_{1}'.format(f, ix)])
            break
    #
    params['TEMPLATES_FILE'] = 'templates/fsps_full/tweak_fsps_QSF_12_v3.param'

    zpfile = None
    load_products = False

    eazy.symlink_eazy_inputs(
        path='/usr/local/share/python/eazy-py/eazy-photoz', path_is_env=False)

    self = eazy.photoz.PhotoZ(param_file=None,
                              translate_file='zphot.translate',
                              zeropoint_file=zpfile,
                              params=params,
                              load_prior=True,
                              load_products=load_products)

    idx = np.arange(self.NOBJ)

    #sample = (mag < 27) #& (self.cat['star_flag'] != 1)
    #sample |= (self.cat['z_spec'] > 0)
    sample = np.isfinite(mag)

    for iter in range(2):
        self.fit_parallel(idx[sample], n_proc=10)
        self.error_residuals()

    self.standard_output()
    zout = utils.read_catalog('{0}.eazypy.zout.fits'.format(root))

    np.save('{0}.eazypy.self.npy'.format(root), [self])

    return self, cat, zout
Example #19
0
def go():

    os.chdir('/home/ec2-user/Mosaics')

    root = 'j123656p6215'

    root = 'j021732m0512'

    root = 'j141956p5255'

    root = 'j033236m2748'

    root = 'j100012p0210'

    # Sync needed files for GOODSN
    if root == 'j123656p6215':
        # os.system('aws s3 sync --exclude "*" --include "{0}*/Prep/*expflag*" --include "{0}*/Prep/*fail*" --include "{0}*/Prep/*cat.fits" --include "{0}*/Prep/{0}_*dr*fits.gz" --include "{0}*/Prep/*visits.npy" --include "{0}*/Prep/*[._]wcs*" --include "{0}*/Prep/*shifts.*" s3://grizli/Pipeline/ .'.format(root))
        # os.system('aws s3 sync --exclude "*" --include "{0}*/Prep/*wcs.log" --include "{0}*/Prep/*shifts.*" s3://grizli/Pipeline/ .'.format(root))

        os.system(
            'aws s3 sync --exclude "*" --include "{0}*/Prep/*expflag*" --include "{0}*/Prep/*fail*" --include "{0}*/Prep/*cat.fits" --include "{0}*/Prep/*visits.npy" --include "{0}*/Prep/*[._]wcs*" --include "{0}*/Prep/*shifts.*" s3://grizli/Pipeline/ .'
            .format(root))

    else:
        #os.system('aws s3 sync --exclude "*" --include "{0}*/Prep/*expflag*" --include "{0}*/Prep/*fail*" --include "{0}*/Prep/*cat.fits" --include "{0}*/Prep/*visits.npy" --include "{0}*/Prep/*[._]wcs*" --include "{0}*/Prep/*_fl?.*" --include "{0}*/Prep/*shifts.*" s3://grizli-v1/Pipeline/ .'.format(root))
        os.system(
            'aws s3 sync --exclude "*" --include "{0}*/Prep/*expflag*" --include "{0}*/Prep/*fail*" --include "{0}*/Prep/*cat.fits" --include "{0}*/Prep/*visits.npy" --include "{0}*/Prep/*[._]wcs*" --include "{0}*/Prep/*shifts.*" s3://grizli-v1/Pipeline/ .'
            .format(root))

    #os.system('files=`find . |grep "dr[cz]_" |grep fits.gz |grep -v "\-ir_dr"`; for file in $files; do echo $file; gunzip -f $file; done')

    # Remake catalogs
    mos_files = glob.glob('{0}*/Prep/*_dr*sci.fits'.format(root))
    mos_files.sort()

    thresh = 10
    force = False

    for i, file in enumerate(mos_files):
        root = file.split('_drc_sci')[0].split('_drz_sci')[0]
        print(i, root)

        cat_file = root + '.cat.fits'
        if (not os.path.exists(cat_file)) | (force):
            prep.make_SEP_catalog(root=root,
                                  threshold=thresh,
                                  get_background=True,
                                  phot_apertures=[1 * u.arcsec])

    os.chdir('Fine/')
    os.system('rsync -avz  ../{0}*/Prep/*cat.fits .'.format(root))
    os.system('ln -s ../{0}*/Prep/*_dr?_sci.fits .'.format(root))
    os.system('rsync -avz  ../{0}*/Prep/*visits.npy .'.format(root))
    os.system('rsync -avz  ../{0}*/Prep/*fail* .'.format(root))

    visit_files = glob.glob('*visits.npy')

    #visit_files = glob.glob('*-tile-*visits.npy')

    visit_files.sort()

    filt = 'f850lp'

    all_visits = []
    for f in visit_files:
        root_i = f.split('_visits.npy')[0]
        cat_file = root_i + '-' + filt + '.cat.fits'
        if not os.path.exists(cat_file):
            continue

        visits, _, _ = np.load(f, allow_pickle=True)
        has_failed = False
        for v in visits:
            has_failed |= os.path.exists(v['product'] + '.failed')

        if has_failed:
            continue

        all_visits.append({'product': root_i + '-' + filt})

    # Fine alignment
    radec, ref_catalog = prep.get_radec_catalog(ra=189.2316435,
                                                dec=62.249739865958,
                                                product='xxx',
                                                reference_catalogs=['PS1'],
                                                radius=30)

    auto_script.fine_alignment(field_root='xxx',
                               all_visits=all_visits,
                               gaia_by_date=False,
                               catalogs=['PS1'],
                               redrizzle=False,
                               shift_only=False,
                               radec=radec,
                               tol=1.e-3)

    ##### Done
    import numpy as np
    import os
    import glob
    from grizli import prep, utils
    from drizzlepac import updatehdr
    import astropy.io.fits as pyfits
    import astropy.wcs as pywcs

    fine_visits, fine_fit = np.load('xxx_fine.npy', allow_pickle=True)

    N = len(fine_visits)

    trans = np.reshape(fine_fit.x, (N, -1))  #/10.
    sh = trans.shape
    if sh[1] == 2:
        pscl = np.array([10., 10.])
        trans = np.hstack([trans / pscl, np.zeros((N, 1)), np.ones((N, 1))])
    elif sh[1] == 3:
        pscl = np.array([10., 10., 100])
        trans = np.hstack([trans / pscl, np.ones((N, 1))])
    elif sh[1] == 4:
        pscl = np.array([10., 10., 100, 100])
        trans = trans / pscl

    #  Update WCS
    # Update direct WCS
    for ix, direct in enumerate(fine_visits):
        root_i = direct['product'][:-7]
        print('\n\n\n\n##### ', ix, root_i, '#######\n\n\n')

        # Sync products
        os.system(
            'aws s3 sync s3://grizli/Pipeline/{0}/Prep/ ./{0}/Prep/ --exclude "*" --include "{0}*drc_sci.fits.gz" --include "*_flc.*fits"'
            .format(root_i))

        #os.system('aws s3 ls s3://grizli/Pipeline/{0}/Prep/'.format(root_i))

        # Just visits and fail
        os.system(
            'aws s3 sync s3://grizli/Pipeline/{0}/Prep/ ./{0}/Prep/ --exclude "*" --include "*visits.npy" --include "*fail*"'
            .format(root_i))

        #direct = visits[ix]
        out_shift, out_rot = trans[ix, :2], trans[ix, 2]
        out_scale = trans[ix, 3]

        xyscale = trans[ix, :4]
        wcs_ref_file = str(
            '{0}/Prep/{0}-f850lp_drc_sci.fits.gz'.format(root_i))
        wcs_ref = pywcs.WCS(pyfits.open(wcs_ref_file)[0].header, relax=True)

        for file in glob.glob('{0}/Prep/*flc.fits'.format(root_i)):
            prep.update_wcs_fits_log(file,
                                     wcs_ref,
                                     xyscale=xyscale,
                                     initialize=False,
                                     replace=('.fits', '.wcslog.fits'),
                                     wcsname='FINE')

            updatehdr.updatewcs_with_shift(file,
                                           wcs_ref_file,
                                           xsh=out_shift[0],
                                           ysh=out_shift[1],
                                           rot=out_rot,
                                           scale=out_scale,
                                           wcsname='FINE',
                                           force=True,
                                           reusename=True,
                                           verbose=True,
                                           sciext='SCI')

            ### Bug in astrodrizzle? Dies if the FLT files don't have MJD-OBS
            ### keywords
            im = pyfits.open(file, mode='update')
            im[0].header['MJD-OBS'] = im[0].header['EXPSTART']
            im.flush()

    ## Fields with catalogs
    visit_files = glob.glob('j*/Prep/*f814w*visits.npy')
    visit_files.sort()

    ## Mosaic now that they're aligned

    # Shift parameters
    if False:
        os.system(
            'aws s3 sync --exclude "*" --include "{0}*/Prep/*expflag*" --include "{0}*/Prep/*fail*" --include "{0}*/Prep/*cat.fits" --include "{0}*/Prep/*visits.npy" --include "{0}*/Prep/*[._]wcs*" --include "{0}*/Prep/*shifts.*" --include "{0}*/Prep/*expflag*" s3://grizli-v1/Pipeline/ .'
            .format(root))

        # No catalogs
        os.system(
            'aws s3 sync --exclude "*" --include "{0}*/Prep/*expflag*" --include "{0}*/Prep/*fail*" --include "{0}*/Prep/*visits.npy" --include "{0}*/Prep/*[._]wcs*" --include "{0}*/Prep/*wcs.log" --include "{0}*/Prep/*shifts.*" s3://grizli-v1/Pipeline/ .'
            .format(root))

        # ACS
        os.system(
            'aws s3 sync --exclude "*" --include "{0}*acswfc*/Prep/*expflag*" --include "{0}*acswfc*/Prep/*fail*" --include "{0}*acswfc*/Prep/*visits.npy" --include "{0}*acswfc*/Prep/*[._]wcs*" --include "{0}*acswfc*/Prep/*shifts.*" s3://grizli-v1/Pipeline/ .'
            .format(root))

    # Shifts
    os.system(
        'echo "# root visit exp xsh ysh rot scl n xrms yrms" > shifts_results.log'
    )
    os.system(
        'grep "_fl"  j*/Prep/*shifts.log | grep -v "\#" | sed "s/:/ /" | sed "s/.Prep./ /g" | sed "s/_shifts.log//" >> shifts_results.log'
    )
    sh = utils.read_catalog('shifts_results.log')
    badsh = (sh['xrms'] > 1) | (sh['yrms'] > 1)  #| (sh['n'] < 10)
    shift_skip_visits = list(np.unique(sh['visit'][badsh]))

    # Flag
    os.system('echo "group,x,expm,flag" > expflag_results.csv')
    os.system(
        'grep "fits" j*/Prep/*expflag*txt | sed "s/:/,/g" | sed "s/.txt/ /" | sed "s/.Prep//" | sed "s/\//,/g" | sed "s/_raw/_flt/" >> expflag_results.csv'
    )
    expf = utils.read_catalog('expflag_results.csv')
    bad_expf = expf['flag'] != 'NORMAL'
    exp_visits = np.unique(expf['group'][bad_expf])
    exp_visits = []

    # wcs
    os.system('echo "# dir visit x xs ys rot scl rms n" > shift_wcs.log')
    os.system(
        'grep " 0 " j*/Prep/*wcs.log | sed "s/.Prep./ /" | sed "s/_wcs.log://" >> shift_wcs.log'
    )
    shifts = utils.read_catalog('shift_wcs.log')
    skip = (shifts['n'] < 7)
    skip |= (shifts['xs'] == 0.) | (np.abs(shifts['rot']) > 0.2)
    skip_visits = [
        '{0}_{1}'.format('_'.join(d.split('_')[:2]), v)
        for d, v in zip(shifts['dir'][skip], shifts['visit'][skip])
    ]

    skip_visits += list(exp_visits)
    skip_visits += shift_skip_visits
    skip_visits = list(np.unique(skip_visits))

    skip_keys = ['uds-12-bhm']
    skip_keys += ['94s-245.0']  # bad shifts.txt
    skip_keys += ['sn2002zx-']  # bad scale

    visit_files = glob.glob('j*/Prep/*visits.npy')
    #visit_files = glob.glob('*shizels*/Prep/*visits.npy')

    all_visits = []
    all_groups = []
    all_info = []

    bucket = 'grizli-v1'
    bucket = 'grizli-cosmos-v2'

    for file in visit_files:
        root_i = os.path.basename(file).split('_visits')[0]
        assoc = '_'.join(os.path.basename(file).split('_')[:2])

        visits, groups, info = np.load(file)
        info['keep'] = False

        ic = 0

        for v in visits:
            failed = glob.glob('{0}/Prep/{1}*fail*'.format(
                root_i, v['product']))
            if len(failed) > 0:
                print(failed)
                continue

            for k in skip_keys:
                if k in v['product']:
                    print('SKIP', v['product'])
                    continue

            v['product'] = assoc + '_' + v['product']
            if v['product'] in skip_visits:
                print('SKIP ', v['product'])
                continue

            v['awspath'] = ['{0}/Pipeline/{1}/Prep'.format(bucket, root_i)
                            ] * len(v['files'])
            all_visits.append(v)
            for f in v['files']:
                info['keep'][info['FILE'] == f] = True

        all_info.append(info[info['keep'] == True])

        for g in groups:
            failed = glob.glob('{0}/Prep/{1}*fail*'.format(
                root_i, g['direct']['product']))
            if len(failed) > 0:
                print(failed)
                continue

            # for ext in ['direct', 'grism']:
            #     g[ext]['product'] = assoc+'_'+g[ext]['product']

            all_groups.append(g)

    import astropy.table
    all_info = astropy.table.vstack(all_info)

    if root == 'j123656p6215':
        out_root = 'gdn-' + root

    elif root == 'j021732m0512':
        out_root = 'uds-' + root
    elif root == 'j141956p5255':
        out_root = 'egs-' + root
    elif root == 'j033236m2748':
        out_root = 'gds-' + root
    elif root == 'j100012p0210':
        out_root = 'cos-' + root
    else:
        out_root = 'xxx-' + root

    np.save('{0}_visits.npy'.format(out_root),
            [all_visits, all_groups, all_info])

    if True:
        os.system(
            'aws s3 cp {0}_visits.npy s3://{1}/Mosaics/ --acl public-read'.
            format(out_root, bucket))

    all_visits, all_groups, all_info = np.load(
        '{0}_visits.npy'.format(out_root))

    ########
    # Remake catalogs and drizzled images
    from drizzlepac.astrodrizzle import AstroDrizzle
    import numpy as np
    import glob
    import os
    from grizli import prep

    all_visits, all_groups, all_info = np.load(
        '{0}_visits.npy'.format(out_root))

    filters = ['f814w', 'f140w', 'f160w', 'f105w', 'f098m', 'f850lp']
    #filters = ['f850lp']
    count, products = 0, []
    for ii, direct in enumerate(all_visits[::-1]):
        filt = direct['product'].split('-')[-1]

        prod_files = glob.glob(direct['product'] + '*sci.fits')

        if (filt not in filters):
            continue
        else:
            products.append(direct['product'])
            if (len(prod_files) > 0):
                print('Skip ', direct['product'])
                continue
            else:
                count += 1
                print('\n\n\n\n===========\n\n', ii, count, direct['product'],
                      '\n\n\n========\n')
                #break

        isACS = '_flc' in direct['files'][0]
        if isACS:
            bits = 64 + 32 + 256
            driz_cr_snr = '3.5 3.0'
            driz_cr_scale = '1.2 0.7'
        else:
            bits = 576 + 256
            driz_cr_snr = '8.0 5.0'
            driz_cr_scale = '2.5 0.7'

        for aws, file in zip(direct['awspath'], direct['files']):
            if not os.path.exists(file):
                os.system('aws s3 cp s3://{0}/{1} ./'.format(aws, file))

        cr_corr = False
        AstroDrizzle(direct['files'],
                     output=direct['product'],
                     clean=True,
                     context=False,
                     preserve=False,
                     skysub=True,
                     driz_separate=cr_corr,
                     driz_sep_wcs=cr_corr,
                     median=cr_corr,
                     blot=cr_corr,
                     driz_cr=cr_corr,
                     driz_cr_corr=cr_corr,
                     driz_cr_snr=driz_cr_snr,
                     driz_cr_scale=driz_cr_scale,
                     driz_combine=True,
                     final_bits=bits,
                     coeffs=True,
                     resetbits=4096 * cr_corr,
                     build=False,
                     final_kernel='point',
                     final_wht_type='IVM')

        # Remake catalog
        cat = prep.make_SEP_catalog(root=direct['product'], threshold=5)
        os.system('rm {0}*_[wbs]?[gt].fits'.format(direct['product']))

        # Remove mosaic
        os.system('rm {0}*_sci.fits'.format(direct['product']))

        # Remove FLC
        if isACS:
            for f in direct['files']:
                os.remove(f)

    # Make mosaics
    kwargs = auto_script.get_yml_parameters()
    mos_args = {
        'mosaic_args':
        kwargs['mosaic_args'],
        'fix_stars':
        kwargs['visit_prep_args']['fix_stars'],
        'mask_spikes':
        kwargs['mask_spikes'],
        'skip_single_optical_visits':
        kwargs['preprocess_args']['skip_single_optical_visits']
    }

    #mos_args['mosaic_args']['ir_filters'] = ['F140W']
    #mos_args['mosaic_args']['optical_filters'] = ['F814W']
    mos_args['mosaic_args']['fill_mosaics'] = False
    mos_args['mosaic_args']['half_optical_pixscale'] = True

    mos_args['mosaic_args']['kernel'] = 'square'
    mos_args['mosaic_args']['pixfrac'] = 0.33
    mos_args['mosaic_args']['wcs_params']['pixel_scale'] = 0.1

    if root == 'j123656p6215':
        mos_args['mosaic_args']['kernel'] = 'point'
        mos_args['mosaic_args']['pixfrac'] = 0.33
        mos_args['mosaic_args']['wcs_params']['pixel_scale'] = 0.1

    #mos_args['mosaic_args']['wcs_params']['filters'] = ['F140W']

    os.system('ln -s j*/Prep/*_fl?.fits .')

    auto_script.make_combined_mosaics(out_root, **mos_args)