def drizzle_images(label='macs0647-jd1', ra=101.9822125, dec=70.24326667, pixscale=0.06, size=10, wcs=None, pixfrac=0.8, kernel='square', theta=0, half_optical_pixscale=False, filters=['f160w','f814w', 'f140w','f125w','f105w','f110w','f098m','f850lp', 'f775w', 'f606w','f475w','f555w','f600lp', 'f390w', 'f350lp'], remove=True, rgb_params=RGB_PARAMS, master='grizli-jan2019', aws_bucket='s3://grizli/CutoutProducts/', scale_ab=21, thumb_height=2.0, sync_fits=True, subtract_median=True, include_saturated=True, include_ir_psf=False): """ label='cp561356'; ra=150.208875; dec=1.850241667; size=40; filters=['f160w','f814w', 'f140w','f125w','f105w','f606w','f475w'] """ import glob import copy import os import numpy as np import astropy.io.fits as pyfits from astropy.coordinates import SkyCoord import astropy.units as u from drizzlepac.adrizzle import do_driz import boto3 from grizli import prep, utils from grizli.pipeline import auto_script if isinstance(ra, str): coo = SkyCoord('{0} {1}'.format(ra, dec), unit=(u.hour, u.deg)) ra, dec = coo.ra.value, coo.dec.value if label is None: try: import mastquery.utils label = mastquery.utils.radec_to_targname(ra=ra, dec=dec, round_arcsec=(1/15, 1), targstr='j{rah}{ram}{ras}{sign}{ded}{dem}{des}') except: label = 'grizli-cutout' #master = 'cosmos' #master = 'grizli-jan2019' if master == 'grizli-jan2019': parent = 's3://grizli/MosaicTools/' s3 = boto3.resource('s3') s3_client = boto3.client('s3') bkt = s3.Bucket('grizli') elif master == 'cosmos': parent = 's3://grizli-preprocess/CosmosMosaic/' s3 = boto3.resource('s3') s3_client = boto3.client('s3') bkt = s3.Bucket('grizli-preprocess') else: # Run on local files, e.g., "Prep" directory parent = None remove = False for ext in ['_visits.fits', '_visits.npy', '_filter_groups.npy'][-1:]: if (not os.path.exists('{0}{1}'.format(master, ext))) & (parent is not None): s3_path = parent.split('/')[-2] s3_file = '{0}{1}'.format(master, ext) print('{0}{1}'.format(parent, s3_file)) bkt.download_file(s3_path+'/'+s3_file, s3_file, ExtraArgs={"RequestPayer": "requester"}) #os.system('aws s3 cp {0}{1}{2} ./'.format(parent, master, ext)) #tab = utils.read_catalog('{0}_visits.fits'.format(master)) #all_visits = np.load('{0}_visits.npy'.format(master))[0] if parent is not None: groups = np.load('{0}_filter_groups.npy'.format(master), allow_pickle=True)[0] else: # Reformat local visits.npy into a groups file groups_files = glob.glob('*filter_groups.npy') if len(groups_files) == 0: visit_file = glob.glob('*visits.npy')[0] visits, groups, info = np.load(visit_file) visit_root = visit_file.split('_visits')[0] visit_filters = np.array([v['product'].split('-')[-1] for v in visits]) groups = {} for filt in np.unique(visit_filters): groups[filt] = {} groups[filt]['filter'] = filt groups[filt]['files'] = [] groups[filt]['footprints'] = [] groups[filt]['awspath'] = None ix = np.where(visit_filters == filt)[0] for i in ix: groups[filt]['files'].extend(visits[i]['files']) groups[filt]['footprints'].extend(visits[i]['footprints']) np.save('{0}_filter_groups.npy'.format(visit_root), [groups]) else: groups = np.load(groups_files[0])[0] #filters = ['f160w','f814w', 'f110w', 'f098m', 'f140w','f125w','f105w','f606w', 'f475w'] has_filts = [] for filt in filters: if filt not in groups: continue visits = [copy.deepcopy(groups[filt])] #visits[0]['reference'] = 'CarlosGG/ak03_j1000p0228/Prep/ak03_j1000p0228-f160w_drz_sci.fits' visits[0]['product'] = label+'-'+filt if wcs is None: hdu = utils.make_wcsheader(ra=ra, dec=dec, size=size, pixscale=pixscale, get_hdu=True, theta=theta) h = hdu.header else: h = utils.to_header(wcs) if (filt[:2] in ['f0', 'f1', 'g1']) | (not half_optical_pixscale): #data = hdu.data pass else: for k in ['NAXIS1','NAXIS2','CRPIX1','CRPIX2']: h[k] *= 2 h['CRPIX1'] -= 0.5 h['CRPIX2'] -= 0.5 for k in ['CD1_1', 'CD1_2', 'CD2_1', 'CD2_2']: h[k] /= 2 #data = np.zeros((h['NAXIS2'], h['NAXIS1']), dtype=np.int16) #pyfits.PrimaryHDU(header=h, data=data).writeto('ref.fits', overwrite=True, output_verify='fix') #visits[0]['reference'] = 'ref.fits' print('\n\n###\nMake filter: {0}'.format(filt)) if (filt.upper() in ['F105W','F125W','F140W','F160W']) & include_ir_psf: clean_i = False else: clean_i = remove status = utils.drizzle_from_visit(visits[0], h, pixfrac=pixfrac, kernel=kernel, clean=clean_i, include_saturated=include_saturated) if status is not None: sci, wht, outh = status if subtract_median: med = np.median(sci[sci != 0]) print('\n\nMedian {0} = {1:.3f}\n\n'.format(filt, med)) sci -= med outh['IMGMED'] = (med, 'Median subtracted from the image') else: med = 0. outh['IMGMED'] = (med, 'Median subtracted from the image') pyfits.writeto('{0}-{1}_drz_sci.fits'.format(label, filt), data=sci, header=outh, overwrite=True, output_verify='fix') pyfits.writeto('{0}-{1}_drz_wht.fits'.format(label, filt), data=wht, header=outh, overwrite=True, output_verify='fix') has_filts.append(filt) if (filt.upper() in ['F105W','F125W','F140W','F160W']) & include_ir_psf: from grizli.galfit.psf import DrizzlePSF hdu = pyfits.open('{0}-{1}_drz_sci.fits'.format(label, filt), mode='update') flt_files = [] #visits[0]['files'] for i in range(1, 10000): key = 'FLT{0:05d}'.format(i) if key not in hdu[0].header: break flt_files.append(hdu[0].header[key]) dp = DrizzlePSF(flt_files=flt_files, driz_hdu=hdu[0]) psf = dp.get_psf(ra=dp.driz_wcs.wcs.crval[0], dec=dp.driz_wcs.wcs.crval[1], filter=filt.upper(), pixfrac=dp.driz_header['PIXFRAC'], kernel=dp.driz_header['KERNEL'], wcs_slice=dp.driz_wcs, get_extended=True, verbose=False, get_weight=False) psf[1].header['EXTNAME'] = 'PSF' #psf[1].header['EXTVER'] = filt hdu.append(psf[1]) hdu.flush() #psf.writeto('{0}-{1}_drz_sci.fits'.format(label, filt), # overwrite=True, output_verify='fix') #status = prep.drizzle_overlaps(visits, parse_visits=False, check_overlaps=True, pixfrac=pixfrac, skysub=False, final_wcs=True, final_wht_type='IVM', static=True, max_files=260, fix_wcs_system=True) # # if len(glob.glob('{0}-{1}*sci.fits'.format(label, filt))): # has_filts.append(filt) if remove: os.system('rm *_fl*fits') if len(has_filts) == 0: return [] if rgb_params: #auto_script.field_rgb(root=label, HOME_PATH=None, filters=has_filts, **rgb_params) show_all_thumbnails(label=label, thumb_height=thumb_height, scale_ab=scale_ab, close=True, rgb_params=rgb_params) if aws_bucket: #aws_bucket = 's3://grizli-cosmos/CutoutProducts/' #aws_bucket = 's3://grizli/CutoutProducts/' s3 = boto3.resource('s3') s3_client = boto3.client('s3') bkt = s3.Bucket(aws_bucket.split("/")[2]) aws_path = '/'.join(aws_bucket.split("/")[3:]) if sync_fits: files = glob.glob('{0}*'.format(label)) else: files = glob.glob('{0}*png'.format(label)) for file in files: print('{0} -> {1}'.format(file, aws_bucket)) bkt.upload_file(file, '{0}/{1}'.format(aws_path, file).replace('//','/'), ExtraArgs={'ACL': 'public-read'}) #os.system('aws s3 sync --exclude "*" --include "{0}*" ./ {1} --acl public-read'.format(label, aws_bucket)) #os.system("""echo "<pre>" > index.html; aws s3 ls AWSBUCKETX --human-readable | sort -k 1 -k 2 | grep -v index | awk '{printf("%s %s",$1, $2); printf(" %6s %s ", $3, $4); print "<a href="$5">"$5"</a>"}'>> index.html; aws s3 cp index.html AWSBUCKETX --acl public-read""".replace('AWSBUCKETX', aws_bucket)) return has_filts
def drizzle_images(label='macs0647-jd1', ra=101.9822125, dec=70.24326667, pixscale=0.1, size=10, wcs=None, pixfrac=0.33, kernel='square', theta=0, half_optical_pixscale=True, filters=['f160w', 'f140w', 'f125w', 'f105w', 'f110w', 'f098m', 'f850lp', 'f814w', 'f775w', 'f606w', 'f475w', 'f555w', 'f600lp', 'f390w', 'f350lp'], skip=None, remove=True, rgb_params=RGB_PARAMS, master='grizli-jan2019', aws_bucket='s3://grizli/CutoutProducts/', scale_ab=21, thumb_height=2.0, sync_fits=True, subtract_median=True, include_saturated=True, include_ir_psf=False, show_filters=['visb', 'visr', 'y', 'j', 'h'], combine_similar_filters=True, single_output=True, aws_prep_dir=None, make_segmentation_figure=False, get_dict=False, dryrun=False, **kwargs): """ label='cp561356'; ra=150.208875; dec=1.850241667; size=40; filters=['f160w','f814w', 'f140w','f125w','f105w','f606w','f475w'] master: These are sets of large lists of available exposures 'cosmos': deprecated 'grizli-cosmos-v2': All imaging covering the COSMOS field 'candels-july2019': CANDELS fields other than COSMOS 'grizli-v1': First processing of the Grizli CHArGE dataset 'grizli-v1-19.12.04': Updated CHArGE fields """ import glob import copy import os import numpy as np import astropy.io.fits as pyfits from astropy.coordinates import SkyCoord import astropy.units as u from drizzlepac.adrizzle import do_driz import boto3 from grizli import prep, utils from grizli.pipeline import auto_script # Function arguments if get_dict: frame = inspect.currentframe() args = inspect.getargvalues(frame).locals pop_args = ['get_dict', 'frame', 'kwargs'] pop_classes = (np.__class__, do_driz.__class__, SkyCoord.__class__) for k in kwargs: args[k] = kwargs[k] for k in args: if isinstance(args[k], pop_classes): pop_args.append(k) for k in pop_args: if k in args: args.pop(k) return args # Boto objects s3 = boto3.resource('s3') s3_client = boto3.client('s3') if isinstance(ra, str): coo = SkyCoord('{0} {1}'.format(ra, dec), unit=(u.hour, u.deg)) ra, dec = coo.ra.value, coo.dec.value if label is None: try: import mastquery.utils label = mastquery.utils.radec_to_targname(ra=ra, dec=dec, round_arcsec=(1/15, 1), targstr='j{rah}{ram}{ras}{sign}{ded}{dem}{des}') except: label = 'grizli-cutout' #master = 'cosmos' #master = 'grizli-jan2019' if master == 'grizli-jan2019': parent = 's3://grizli/MosaicTools/' bkt = s3.Bucket('grizli') elif master == 'cosmos': parent = 's3://grizli-preprocess/CosmosMosaic/' bkt = s3.Bucket('grizli-preprocess') elif master == 'grizli-cosmos-v2': parent = 's3://grizli-cosmos-v2/Mosaics/' bkt = s3.Bucket('grizli-cosmos-v2') elif master == 'candels-july2019': parent = 's3://grizli-v1/Mosaics/' bkt = s3.Bucket('grizli-v1') elif master == 'grizli-v1-19.12.04': parent = 's3://grizli-v1/Mosaics/' bkt = s3.Bucket('grizli-v1') elif master == 'grizli-v1-19.12.05': parent = 's3://grizli-v1/Mosaics/' bkt = s3.Bucket('grizli-v1') else: # Run on local files, e.g., "Prep" directory parent = None bkt = None #remove = False # Download summary files from S3 for ext in ['_visits.fits', '_visits.npy', '_filter_groups.npy'][-1:]: newfile = '{0}{1}'.format(master, ext) if (not os.path.exists(newfile)) & (parent is not None): s3_path = parent.split('/')[-2] s3_file = '{0}{1}'.format(master, ext) print('{0}{1}'.format(parent, s3_file)) bkt.download_file(s3_path+'/'+s3_file, s3_file, ExtraArgs={"RequestPayer": "requester"}) #os.system('aws s3 cp {0}{1}{2} ./'.format(parent, master, ext)) #tab = utils.read_catalog('{0}_visits.fits'.format(master)) #all_visits = np.load('{0}_visits.npy'.format(master))[0] if parent is not None: groups = np.load('{0}_filter_groups.npy'.format(master), allow_pickle=True)[0] else: if aws_prep_dir is not None: spl = aws_prep_dir.replace('s3://', '').split('/') prep_bucket = spl[0] prep_root = spl[2] prep_bkt = s3.Bucket(prep_bucket) s3_prep_path = 'Pipeline/{0}/Prep/'.format(prep_root) s3_full_path = '{0}/{1}'.format(prep_bucket, s3_prep_path) s3_file = '{0}_visits.npy'.format(prep_root) # Make output path Prep/../Thumbnails/ if aws_bucket is not None: aws_bucket = ('s3://' + s3_full_path.replace('/Prep/', '/Thumbnails/')) print('{0}{1}'.format(s3_prep_path, s3_file)) if not os.path.exists(s3_file): prep_bkt.download_file(os.path.join(s3_prep_path, s3_file), s3_file, ExtraArgs={"RequestPayer": "requester"}) groups_files = glob.glob('{0}_filter_groups.npy'.format(prep_root)) visit_query = prep_root+'_' else: groups_files = glob.glob('*filter_groups.npy') visit_query = '*' # Reformat local visits.npy into a groups file if (len(groups_files) == 0): visit_file = glob.glob(visit_query+'visits.npy')[0] visits, groups, info = np.load(visit_file, allow_pickle=True) visit_root = visit_file.split('_visits')[0] visit_filters = np.array([v['product'].split('-')[-1] for v in visits]) groups = {} for filt in np.unique(visit_filters): groups[filt] = {} groups[filt]['filter'] = filt groups[filt]['files'] = [] groups[filt]['footprints'] = [] groups[filt]['awspath'] = [] ix = np.where(visit_filters == filt)[0] for i in ix: groups[filt]['files'].extend(visits[i]['files']) groups[filt]['footprints'].extend(visits[i]['footprints']) Nf = len(groups[filt]['files']) print('{0:>6}: {1:>3} exposures'.format(filt, Nf)) if aws_prep_dir is not None: groups[filt]['awspath'] = [s3_full_path for file in range(Nf)] np.save('{0}_filter_groups.npy'.format(visit_root), [groups]) else: print('Use groups file: {0}'.format(groups_files[0])) groups = np.load(groups_files[0], allow_pickle=True)[0] #filters = ['f160w','f814w', 'f110w', 'f098m', 'f140w','f125w','f105w','f606w', 'f475w'] filt_dict = FilterDict() filt_dict.meta['label'] = label filt_dict.meta['ra'] = ra filt_dict.meta['dec'] = dec filt_dict.meta['size'] = size filt_dict.meta['master'] = master filt_dict.meta['parent'] = parent if filters is None: filters = list(groups.keys()) has_filts = [] lower_filters = [f.lower() for f in filters] for filt in lower_filters: if filt not in groups: continue visits = [copy.deepcopy(groups[filt])] #visits[0]['reference'] = 'CarlosGG/ak03_j1000p0228/Prep/ak03_j1000p0228-f160w_drz_sci.fits' visits[0]['product'] = label+'-'+filt if wcs is None: hdu = utils.make_wcsheader(ra=ra, dec=dec, size=size, pixscale=pixscale, get_hdu=True, theta=theta) h = hdu.header else: h = utils.to_header(wcs) if (filt[:2] in ['f0', 'f1', 'g1']) | (not half_optical_pixscale): #data = hdu.data pass else: for k in ['NAXIS1', 'NAXIS2', 'CRPIX1', 'CRPIX2']: h[k] *= 2 h['CRPIX1'] -= 0.5 h['CRPIX2'] -= 0.5 for k in ['CD1_1', 'CD1_2', 'CD2_1', 'CD2_2']: if k in h: h[k] /= 2 #data = np.zeros((h['NAXIS2'], h['NAXIS1']), dtype=np.int16) #pyfits.PrimaryHDU(header=h, data=data).writeto('ref.fits', overwrite=True, output_verify='fix') #visits[0]['reference'] = 'ref.fits' print('\n\n###\nMake filter: {0}'.format(filt)) if (filt.upper() in ['F105W', 'F110W', 'F125W', 'F140W', 'F160W']) & include_ir_psf: clean_i = False else: clean_i = remove status = utils.drizzle_from_visit(visits[0], h, pixfrac=pixfrac, kernel=kernel, clean=clean_i, include_saturated=include_saturated, skip=skip, dryrun=dryrun) if dryrun: filt_dict[filt] = status continue elif status is not None: sci, wht, outh, filt_dict[filt] = status if subtract_median: #med = np.median(sci[sci != 0]) try: un_data = np.unique(sci[(sci != 0) & np.isfinite(sci)]) med = utils.mode_statistic(un_data) except: med = 0. if not np.isfinite(med): med = 0. print('\n\nMedian {0} = {1:.3f}\n\n'.format(filt, med)) outh['IMGMED'] = (med, 'Median subtracted from the image') else: med = 0. outh['IMGMED'] = (0., 'Median subtracted from the image') pyfits.writeto('{0}-{1}_drz_sci.fits'.format(label, filt), data=sci, header=outh, overwrite=True, output_verify='fix') pyfits.writeto('{0}-{1}_drz_wht.fits'.format(label, filt), data=wht, header=outh, overwrite=True, output_verify='fix') has_filts.append(filt) if (filt.upper() in ['F105W', 'F110W', 'F125W', 'F140W', 'F160W']) & include_ir_psf: from grizli.galfit.psf import DrizzlePSF hdu = pyfits.open('{0}-{1}_drz_sci.fits'.format(label, filt), mode='update') flt_files = [] # visits[0]['files'] for i in range(1, 10000): key = 'FLT{0:05d}'.format(i) if key not in hdu[0].header: break flt_files.append(hdu[0].header[key]) try: dp = DrizzlePSF(flt_files=flt_files, driz_hdu=hdu[0]) psf = dp.get_psf(ra=dp.driz_wcs.wcs.crval[0], dec=dp.driz_wcs.wcs.crval[1], filter=filt.upper(), pixfrac=dp.driz_header['PIXFRAC'], kernel=dp.driz_header['KERNEL'], wcs_slice=dp.driz_wcs, get_extended=True, verbose=False, get_weight=False) psf[1].header['EXTNAME'] = 'PSF' #psf[1].header['EXTVER'] = filt hdu.append(psf[1]) hdu.flush() except: pass if remove: os.system('rm *_fl*fits') # Dry run, just return dictionary of the found exposure files if dryrun: return filt_dict # Nothing found if len(has_filts) == 0: return [] if combine_similar_filters: combine_filters(label=label) if rgb_params: #auto_script.field_rgb(root=label, HOME_PATH=None, filters=has_filts, **rgb_params) show_all_thumbnails(label=label, thumb_height=thumb_height, scale_ab=scale_ab, close=True, rgb_params=rgb_params, filters=show_filters) if (single_output != 0): # Concatenate into a single FITS file files = glob.glob('{0}-f*_dr[cz]_sci.fits'.format(label)) files.sort() if combine_similar_filters: comb_files = glob.glob('{0}-[a-eg-z]*_dr[cz]_sci.fits'.format(label)) comb_files.sort() files += comb_files hdul = None for file in files: hdu_i = pyfits.open(file) hdu_i[0].header['EXTNAME'] = 'SCI' if 'NCOMBINE' in hdu_i[0].header: if hdu_i[0].header['NCOMBINE'] <= single_output: continue filt_i = file.split('-')[-1].split('_dr')[0] else: filt_i = utils.get_hst_filter(hdu_i[0].header) for h in hdu_i: h.header['EXTVER'] = filt_i if hdul is None: hdul = pyfits.HDUList([h]) else: hdul.append(h) print('Add to {0}.thumb.fits: {1}'.format(label, file)) # Weight hdu_i = pyfits.open(file.replace('_sci', '_wht')) hdu_i[0].header['EXTNAME'] = 'WHT' for h in hdu_i: h.header['EXTVER'] = filt_i if hdul is None: hdul = pyfits.HDUList([h]) else: hdul.append(h) hdul.writeto('{0}.thumb.fits'.format(label), overwrite=True, output_verify='fix') for file in files: for f in [file, file.replace('_sci', '_wht')]: if os.path.exists(f): print('Remove {0}'.format(f)) os.remove(f) # Segmentation figure thumb_file = '{0}.thumb.fits'.format(label) if (make_segmentation_figure) & (os.path.exists(thumb_file)) & (aws_prep_dir is not None): print('Make segmentation figure') # Fetch segmentation image and catalog s3_prep_path = 'Pipeline/{0}/Prep/'.format(prep_root) s3_full_path = '{0}/{1}'.format(prep_bucket, s3_prep_path) s3_file = '{0}_visits.npy'.format(prep_root) has_seg_files = True seg_files = ['{0}-ir_seg.fits.gz'.format(prep_root), '{0}_phot.fits'.format(prep_root)] for s3_file in seg_files: if not os.path.exists(s3_file): remote_file = os.path.join(s3_prep_path, s3_file) try: print('Fetch {0}'.format(remote_file)) prep_bkt.download_file(remote_file, s3_file, ExtraArgs={"RequestPayer": "requester"}) except: has_seg_files = False print('Make segmentation figure failed: {0}'.format(remote_file)) break if has_seg_files: s3_cat = utils.read_catalog(seg_files[1]) segmentation_figure(label, s3_cat, seg_files[0]) if aws_bucket: #aws_bucket = 's3://grizli-cosmos/CutoutProducts/' #aws_bucket = 's3://grizli/CutoutProducts/' s3 = boto3.resource('s3') s3_client = boto3.client('s3') bkt = s3.Bucket(aws_bucket.split("/")[2]) aws_path = '/'.join(aws_bucket.split("/")[3:]) if sync_fits: files = glob.glob('{0}*'.format(label)) else: files = glob.glob('{0}*png'.format(label)) for file in files: print('{0} -> {1}'.format(file, aws_bucket)) bkt.upload_file(file, '{0}/{1}'.format(aws_path, file).replace('//', '/'), ExtraArgs={'ACL': 'public-read'}) #os.system('aws s3 sync --exclude "*" --include "{0}*" ./ {1} --acl public-read'.format(label, aws_bucket)) #os.system("""echo "<pre>" > index.html; aws s3 ls AWSBUCKETX --human-readable | sort -k 1 -k 2 | grep -v index | awk '{printf("%s %s",$1, $2); printf(" %6s %s ", $3, $4); print "<a href="$5">"$5"</a>"}'>> index.html; aws s3 cp index.html AWSBUCKETX --acl public-read""".replace('AWSBUCKETX', aws_bucket)) return has_filts
def resample_array(img, wht=None, pixratio=2, slice_if_int=True, int_tol=1.e-3, method='drizzle', drizzle_kwargs=DRIZZLE_KWARGS, rescale_kwargs=RESCALE_KWARGS, scale_by_area=False, verbose=False, blot_stepsize=-1, **kwargs): """ Resample an image to a new grid. If pixratio is an integer, just return a slice of the input `img`. Otherwise resample with `~drizzlepac` or `~resample`. """ from grizli.utils import (make_wcsheader, drizzle_array_groups, blot_nearest_exact) from skimage.transform import rescale, resize, downscale_local_mean is_int = np.isclose(pixratio, np.round(pixratio), atol=int_tol) if is_int & (pixratio > 1): # Integer scaling step = int(np.round(pixratio)) if method.lower() == 'drizzle': _, win = make_wcsheader(ra=90, dec=0, size=img.shape, pixscale=1., get_hdu=False, theta=0.) _, wout = make_wcsheader(ra=90, dec=0, size=img.shape, pixscale=pixratio, get_hdu=False, theta=0.) if wht is None: wht = np.ones_like(img) _drz = drizzle_array_groups([img], [wht], [win], outputwcs=wout, **drizzle_kwargs) res = _drz[0] res_wht = _drz[1] method_used = 'drizzle' elif slice_if_int: # Simple slice res = img[step // 2::step, step // 2::step] * 1 res_wht = np.ones_like(res) method_used = 'slice' else: # skimage downscale with averaging res = downscale_local_mean(img, (step, step), cval=0, clip=True) res_wht = np.ones_like(res) method_used = 'downscale' else: if method.lower() == 'drizzle': # Drizzle _, win = make_wcsheader(ra=90, dec=0, size=img.shape, pixscale=1., get_hdu=False, theta=0.) _, wout = make_wcsheader(ra=90, dec=0, size=img.shape, pixscale=pixratio, get_hdu=False, theta=0.) if wht is None: wht = np.ones_like(img) _drz = drizzle_array_groups([img], [wht], [win], outputwcs=wout, **drizzle_kwargs) res = _drz[0] res_wht = _drz[1] method_used = 'drizzle' elif method.lower() == 'blot': # Blot exact values _, win = make_wcsheader(ra=90, dec=0, size=img.shape, pixscale=1., get_hdu=False, theta=0.) _, wout = make_wcsheader(ra=90, dec=0, size=img.shape, pixscale=pixratio, get_hdu=False, theta=0.) # Ones for behaviour around zeros res = blot_nearest_exact(img + 1, win, wout, verbose=False, stepsize=blot_stepsize, scale_by_pixel_area=False, wcs_mask=False, fill_value=0) - 1 res_wht = np.ones_like(res) method_used = 'blot' elif method.lower() == 'rescale': res = rescale(img, 1. / pixratio, **rescale_kwargs) res_wht = np.ones_like(res) method_used = 'rescale' else: raise ValueError("method must be 'drizzle', 'blot' or 'rescale'.") if scale_by_area: scale = 1. / pixratio**2 else: scale = 1 if verbose: msg = 'resample_array x {4:.1f}: {0} > {1}, method={2}, scale={3:.2f}' print(msg.format(img.shape, res.shape, method_used, scale, pixratio)) if not np.isclose(scale, 1, 1.e-4): res = res * scale res_wht = res_wht / scale**2 #print(res_wht, res_wht.dtype, scale, res_wht.shape) #res_wht /= scale**2 return res, res_wht
def irac_mosaics(root='j000308m3303', home='/GrizliImaging/', pixfrac=0.2, kernel='square', initial_pix=1.0, final_pix=0.5, pulldown_mag=15.2, sync_xbcd=True, skip_fetch=False, radec=None, mosaic_pad=2.5, drizzle_ref_file='', run_alignment=True, assume_close=True, bucket='grizli-v1', aor_query='r*', mips_ext='[_e]bcd.fits', channels=['ch1','ch2','ch3','ch4','mips1'], drz_query='r*', sync_results=True, ref_seg=None, min_frame={'irac':5, 'mips':1.0}, med_max_size=500e6, stop_at='', make_psf=True, **kwargs): """ stop_at: preprocess, make_compact """ from grizli import utils from . import irac from .utils import get_wcslist, fetch_irac PATH = os.path.join(home, root) try: os.mkdir(PATH) except: pass os.chdir(PATH) if not skip_fetch: # Fetch IRAC bcds if not os.path.exists(f'{root}_ipac.fits'): os.system(f'wget https://s3.amazonaws.com/{bucket}/IRAC/{root}_ipac.fits') res = fetch_irac(root=root, path='./', channels=channels) if res in [False, None]: # Nothing to do make_html(root, bucket=bucket) print(f'### Done: \n https://s3.amazonaws.com/{bucket}/Pipeline/{root}/IRAC/{root}.irac.html') utils.log_comment(f'/tmp/{root}.success', 'Done!', verbose=True, show_date=True) return True # Sync CHArGE HST images os.system(f'aws s3 sync s3://{bucket}/Pipeline/{root}/Prep/ ./ ' f' --exclude "*" --include "{root}*seg.fits*"' f' --include "{root}-ir_drz*fits*"' f' --include "{root}*psf.fits*"' f' --include "{root}-f[01]*_drz*fits.gz"' f' --include "{root}*phot.fits"') # Drizzle properties of the preliminary mosaic #pixfrac, pix, kernel = 0.2, 1.0, 'square' # Define an output WCS aligned in pixel phase to the HST mosaic () if not os.path.exists('ref_hdu.fits'): wcslist = get_wcslist(skip=-500) out_hdu = utils.make_maximal_wcs(wcslist, pixel_scale=initial_pix, theta=0, pad=5, get_hdu=True, verbose=True) # Make sure pixels align ref_file = glob.glob('{0}-f[01]*_drz_sci.fits*'.format(root)) if len(ref_file) == 0: os.system(f'aws s3 sync s3://{bucket}/Pipeline/{root}/Prep/ ./ ' f' --exclude "*"' f' --include "{root}-f[678]*_dr*fits.gz"') ref_file = glob.glob('{0}-f[678]*_dr*_sci.fits*'.format(root)) ref_file = ref_file[-1] print(f'\nHST reference image: {ref_file}\n') ref_hdu = pyfits.open(ref_file)[0].header ref_filter = utils.get_hst_filter(ref_hdu).lower() ref_wcs = pywcs.WCS(ref_hdu) ref_rd = ref_wcs.all_pix2world(np.array([[-0.5, -0.5]]), 0).flatten() target_phase = np.array([0.5, 0.5])#/(pix/0.1) for k in ['RADESYS', 'LATPOLE', 'LONPOLE']: out_hdu.header[k] = ref_hdu[k] # Shift CRVAL to same tangent point out_wcs = pywcs.WCS(out_hdu.header) out_xy = out_wcs.all_world2pix(np.array([ref_wcs.wcs.crval]), 1).flatten() out_hdu.header['CRVAL1'], out_hdu.header['CRVAL2'] = tuple(ref_wcs.wcs.crval) out_hdu.header['CRPIX1'], out_hdu.header['CRPIX2'] = tuple(out_xy) # Align integer pixel phase out_wcs = pywcs.WCS(out_hdu.header) out_xy = out_wcs.all_world2pix(np.array([ref_rd]), 0).flatten() xy_phase = out_xy - np.floor(out_xy) new_crpix = out_wcs.wcs.crpix - (xy_phase - target_phase) out_hdu.header['CRPIX1'], out_hdu.header['CRPIX2'] = tuple(new_crpix) out_wcs = pywcs.WCS(out_hdu.header) out_hdu.writeto('ref_hdu.fits', output_verify='Fix') else: out_hdu = pyfits.open('ref_hdu.fits')[1] ######## files = [] for ch in channels: if 'mips' in ch: mc = ch.replace('mips','ch') files += glob.glob(f'{aor_query}/{mc}/bcd/SPITZER_M*{mips_ext}') files += glob.glob(f'{aor_query}/{mc}/bcd/SPITZER_M*xbcd.fits.gz') else: files += glob.glob(f'{aor_query}/{ch}/bcd/SPITZER_I*cbcd.fits') files += glob.glob(f'{aor_query}/{ch}/bcd/SPITZER_I*xbcd.fits.gz') files.sort() roots = np.array([file.split('/')[0] for file in files]) with_channels = np.array([file.split('_')[1] for file in files]) all_roots = np.array(['{0}-{1}'.format(r, c.replace('I','ch').replace('M', 'mips')) for r, c in zip(roots, with_channels)]) tab = {'aor':[], 'N':[], 'channel':[]} for r in np.unique(all_roots): tab['aor'].append(r.split('-')[0]) tab['N'].append((all_roots == r).sum()) tab['channel'].append(r.split('-')[1]) aors = utils.GTable(tab) print(aors) ######## SKIP = True # Don't regenerate finished files delete_group = False # Delete intermediate products from memory zip_outputs = False # GZip intermediate products aors_ch = {} ######## # Process mosaics by AOR # Process in groups, helps for fields like HFF with dozens/hundreds of AORs! for ch in channels: aor = aors[(aors['channel'] == ch) & (aors['N'] > 5)] if len(aor) == 0: continue #aors_ch[ch] = [] if ch in ['ch1','ch2']: NPER, instrument = 500, 'irac' if ch in ['ch3','ch4']: NPER, instrument = 500, 'irac' elif ch in ['mips1']: NPER, instrument = 400, 'mips' min_frametime = min_frame[instrument] nsort = np.cumsum(aor['N']/NPER) NGROUP = int(np.ceil(nsort.max())) count = 0 for g in range(NGROUP): root_i = root+'-{0:02d}'.format(g) gsel = (nsort > g) & (nsort <= g+1) aor_ids = list(aor['aor'][gsel]) print('{0}-{1} N_AOR = {2:>2d} N_EXP = {3:>4d}'.format(root_i, ch, len(aor_ids), aor['N'][gsel].sum())) count += gsel.sum() files = glob.glob('{0}-{1}*'.format(root_i, ch)) if (len(files) > 0) & (SKIP): print('Skip {0}-{1}'.format(root_i, ch)) continue with open('{0}-{1}.log'.format(root_i, ch),'w') as fp: fp.write(time.ctime()) # Do internal alignment to GAIA. # Otherwise, set `radec` to the name of a file that has two columns with # reference ra/dec. #radec = None # Pipeline if instrument == 'mips': aors_ch[ch] = irac.process_all(channel=ch.replace('mips','ch'), output_root=root_i, driz_scale=initial_pix, kernel=kernel, pixfrac=pixfrac, wcslist=None, pad=0, out_hdu=out_hdu, aor_ids=aor_ids, flat_background=False, two_pass=True, min_frametime=min_frametime, instrument=instrument, align_threshold=0.15, radec=radec, run_alignment=False, mips_ext=mips_ext, ref_seg=ref_seg, global_mask=root+'_mask.reg') else: aors_ch[ch] = irac.process_all(channel=ch, output_root=root_i, driz_scale=initial_pix, kernel=kernel, pixfrac=pixfrac, wcslist=None, pad=0, out_hdu=out_hdu, aor_ids=aor_ids, flat_background=False, two_pass=True, min_frametime=min_frametime, instrument=instrument, radec=radec, run_alignment=run_alignment, assume_close=assume_close, ref_seg=ref_seg, global_mask=root+'_mask.reg', med_max_size=med_max_size) if len(aors_ch[ch]) == 0: continue # PSFs plt.ioff() if (instrument != 'mips') & make_psf: ch_num = int(ch[-1]) segmask=True # psf_size=20 # for p in [0.1, final_pix]: # irac.mosaic_psf(output_root=root_i, target_pix=p, channel=ch_num, aors=aors_ch[ch], kernel=kernel, pixfrac=pixfrac, size=psf_size, native_orientation=False, instrument=instrument, subtract_background=False, segmentation_mask=segmask, max_R=10) # plt.close('all') psf_size=30 p = 0.1 irac.mosaic_psf(output_root=root_i, target_pix=p, channel=ch_num, aors=aors_ch[ch], kernel=kernel, pixfrac=pixfrac, size=psf_size, native_orientation=True, subtract_background=False, segmentation_mask=segmask, max_R=10) plt.close('all') if delete_group: del(aors_ch[ch]) print('Done {0}-{1}, gzip products'.format(root_i, ch)) if zip_outputs: os.system('gzip {0}*-{1}_drz*fits'.format(root_i, ch)) # PSFs if (instrument != 'mips') & make_psf: # Average PSF p = 0.1 files = glob.glob('*{0}-{1:.1f}*psfr.fits'.format(ch, p)) if len(files) == 0: continue files.sort() avg = None for file in files: im = pyfits.open(file) if avg is None: wht = im[0].data != 0 avg = im[0].data*wht else: wht_i = im[0].data != 0 avg += im[0].data*wht_i wht += wht_i im.close() avg = avg/wht avg[wht == 0] = 0 # Window from photutils import (HanningWindow, TukeyWindow, CosineBellWindow, SplitCosineBellWindow, TopHatWindow) coswindow = CosineBellWindow(alpha=1) avg *= coswindow(avg.shape)**0.05 avg /= avg.sum() pyfits.writeto('{0}-{1}-{2:0.1f}.psfr_avg.fits'.format(root, ch, p), data=avg, header=im[0].header, overwrite=True) #### ## Show the initial product plt.ioff() for i in range(10): files = glob.glob(f'{root}-{i:02d}-ch*sci.fits') if len(files) > 0: break files.sort() if len(files) == 1: subs = 1,1 fs = [7,7] elif len(files) == 2: subs = 1,2 fs = [14,7] elif len(files) == 3: subs = 2,2 fs = [14,14] else: subs = 2,2 fs = [14,14] fig = plt.figure(figsize=fs) for i, file in enumerate(files[:4]): im = pyfits.open(file) print('{0} {1} {2:.1f} s'.format(file, im[0].header['FILTER'], im[0].header['EXPTIME'])) ax = fig.add_subplot(subs[0], subs[1], 1+i) ax.imshow(im[0].data, vmin=-0.1, vmax=1, cmap='gray_r', origin='lower') ax.text(0.05, 0.95, file, ha='left', va='top', color='k', transform=ax.transAxes) im.close() if len(files) > 1: fig.axes[1].set_yticklabels([]) if len(files) > 2: fig.axes[0].set_xticklabels([]) fig.axes[1].set_xticklabels([]) if len(files) > 3: fig.axes[3].set_yticklabels([]) fig.tight_layout(pad=0.5) fig.savefig(f'{root}.init.png') plt.close('all') if stop_at == 'preprocess': return True ####### # Make more compact individual exposures and clean directories wfiles = [] for ch in channels: if 'mips' in ch: chq = ch.replace('mips','ch') wfiles += glob.glob(f'{aor_query}/{chq}/bcd/SPITZER_M*wcs.fits') else: wfiles += glob.glob(f'{aor_query}/{ch}/bcd/SPITZER_I*wcs.fits') #wfiles = glob.glob('r*/*/bcd/*_I[1-4]_*wcs.fits') #wfiles += glob.glob('r*/*/bcd/*_M[1-4]_*wcs.fits') wfiles.sort() for wcsfile in wfiles: outfile = wcsfile.replace('_wcs.fits', '_xbcd.fits.gz') if os.path.exists(outfile): print(outfile) else: irac.combine_products(wcsfile) print('Run: ', outfile) if os.path.exists(outfile): remove_files = glob.glob('{0}*fits'.format(wcsfile.split('_wcs')[0])) for f in remove_files: print(' rm ', f) os.remove(f) if stop_at == 'make_compact': return True ############# # Drizzle final mosaics # Make final mosaic a bit bigger than the HST image pad = mosaic_pad # Pixel scale of final mosaic. # Don't make too small if not many dithers available as in this example. # But for well-sampled mosaics like RELICS / HFF, can push this to perhaps 0.3" / pix pixscale = final_pix #0.5 # Again, if have many dithers maybe can use more aggressive drizzle parameters, # like a 'point' kernel or smaller pixfrac (a 'point' kernel is pixfrac=0) #kernel, pixfrac = 'square', 0.2 # Correction for bad columns near bright stars #pulldown_mag = 15.2 ############## # Dilation for CR rejection dil = np.ones((3,3)) driz_cr = [7, 4] blot_interp = 'poly5' bright_fmax = 0.5 ### Drizzle for ch in channels: #[:2]: ########### # Files and reference image for extra CR rejection if ch == 'mips1': files = glob.glob('{0}/ch1/bcd/SPITZER_M1_*xbcd.fits*'.format(drz_query, ch)) files.sort() pulldown_mag = -10 pixscale = 1. kernel = 'point' else: files = glob.glob('{0}/{1}/bcd/*_I?_*xbcd.fits*'.format(drz_query, ch)) files.sort() #ref = pyfits.open('{0}-00-{1}_drz_sci.fits'.format(root, ch)) #ref_data = ref[0].data.astype(np.float32) ref_files = glob.glob(f'{root}-??-{ch}*sci.fits') if len(ref_files) == 0: continue num = None for ref_file in ref_files: ref = pyfits.open(ref_file) wht = pyfits.open(ref_file.replace('_sci.fits', '_wht.fits')) if num is None: num = ref[0].data*wht[0].data den = wht[0].data else: num += ref[0].data*wht[0].data den += wht[0].data ref_data = (num/den).astype(np.float32) ref_data[den <= 0] = 0 ref_wcs = pywcs.WCS(ref[0].header, relax=True) ref_wcs.pscale = utils.get_wcs_pscale(ref_wcs) if (not hasattr(ref_wcs, '_naxis1')) & hasattr(ref_wcs, '_naxis'): ref_wcs._naxis1, ref_wcs._naxis2 = ref_wcs._naxis ############## # Output WCS based on HST footprint if drizzle_ref_file == '': try: hst_im = pyfits.open(glob.glob('{0}-f[01]*_drz_sci.fits*'.format(root))[-1]) except: hst_im = pyfits.open(glob.glob('{0}-f[578]*_dr*sci.fits*'.format(root))[-1]) hst_wcs = pywcs.WCS(hst_im[0]) hst_wcs.pscale = utils.get_wcs_pscale(hst_wcs) try: size = (np.round(np.array([hst_wcs._naxis1, hst_wcs._naxis2])*hst_wcs.pscale*pad/pixscale)*pixscale) except: size = (np.round(np.array([hst_wcs._naxis[0], hst_wcs._naxis[1]])*hst_wcs.pscale*pad/pixscale)*pixscale) hst_rd = hst_wcs.calc_footprint().mean(axis=0) _x = utils.make_wcsheader(ra=hst_rd[0], dec=hst_rd[1], size=size, pixscale=pixscale, get_hdu=False, theta=0) out_header, out_wcs = _x else: driz_ref_im = pyfits.open(drizzle_ref_file) out_wcs = pywcs.WCS(driz_ref_im[0].header, relax=True) out_wcs.pscale = utils.get_wcs_pscale(out_wcs) out_header = utils.to_header(out_wcs) if (not hasattr(out_wcs, '_naxis1')) & hasattr(out_wcs, '_naxis'): out_wcs._naxis1, out_wcs._naxis2 = out_wcs._naxis ############## # Bright stars for pulldown correction cat_file = glob.glob(f'{root}-[0-9][0-9]-{ch}.cat.fits')[0] ph = utils.read_catalog(cat_file) bright = (ph['mag_auto'] < pulldown_mag) # & (ph['flux_radius'] < 3) ph = ph[bright] ############## # Now do the drizzling yp, xp = np.indices((256, 256)) orig_files = [] out_header['DRIZ_CR0'] = driz_cr[0] out_header['DRIZ_CR1'] = driz_cr[1] out_header['KERNEL'] = kernel out_header['PIXFRAC'] = pixfrac out_header['NDRIZIM'] = 0 out_header['EXPTIME'] = 0 out_header['BUNIT'] = 'microJy' out_header['FILTER'] = ch med_root = 'xxx' N = len(files) for i, file in enumerate(files):#[:100]): print('{0}/{1} {2}'.format(i, N, file)) if file in orig_files: continue im = pyfits.open(file) ivar = 1/im['CBUNC'].data**2 msk = (~np.isfinite(ivar)) | (~np.isfinite(im['CBCD'].data)) im['CBCD'].data[msk] = 0 ivar[msk] = 0 wcs = pywcs.WCS(im['WCS'].header, relax=True) wcs.pscale = utils.get_wcs_pscale(wcs) if (not hasattr(wcs, '_naxis1')) & hasattr(wcs, '_naxis'): wcs._naxis1, wcs._naxis2 = wcs._naxis fp = Path(wcs.calc_footprint()) med_root_i = im.filename().split('/')[0] if med_root != med_root_i: print('\n Read {0}-{1}_med.fits \n'.format(med_root_i, ch)) med = pyfits.open('{0}-{1}_med.fits'.format(med_root_i, ch)) med_data = med[0].data.astype(np.float32) med_root = med_root_i med.close() try: gaia_rd = utils.read_catalog('{0}-{1}_gaia.radec'.format(med_root_i, ch)) ii, rr = gaia_rd.match_to_catalog_sky(ph) gaia_rd = gaia_rd[ii][rr.value < 2] gaia_pts = np.array([gaia_rd['ra'].data, gaia_rd['dec'].data]).T except: gaia_rd = [] #data = im['CBCD'].data - aor_med[0].data # Change output units to uJy / pix if ch == 'mips1': # un = 1*u.MJy/u.sr # #to_ujy_px = un.to(u.uJy/u.arcsec**2).value*(out_wcs.pscale**2) # to_ujy_px = un.to(u.uJy/u.arcsec**2).value*(native_scale**2) to_ujy_px = 146.902690 else: # native_scale = 1.223 # un = 1*u.MJy/u.sr # #to_ujy_px = un.to(u.uJy/u.arcsec**2).value*(out_wcs.pscale**2) # to_ujy_px = un.to(u.uJy/u.arcsec**2).value*(native_scale**2) to_ujy_px = 35.17517196810 blot_data = ablot.do_blot(ref_data, ref_wcs, wcs, 1, coeffs=True, interp=blot_interp, sinscl=1.0, stepsize=10, wcsmap=None)/to_ujy_px # mask for bright stars eblot = 1-np.clip(blot_data, 0, bright_fmax)/bright_fmax # Initial CR clean = im[0].data - med_data - im['WCS'].header['PEDESTAL'] dq = (clean - blot_data)*np.sqrt(ivar)*eblot > driz_cr[0] # Adjacent CRs dq_dil = binary_dilation(dq, selem=dil) dq |= ((clean - blot_data)*np.sqrt(ivar)*eblot > driz_cr[1]) & (dq_dil) # Very negative pixels dq |= clean*np.sqrt(ivar) < -4 original_dq = im['WCS'].data - (im['WCS'].data & 1) dq |= original_dq > 0 # Pulldown correction for bright stars if len(gaia_rd) > 0: mat = fp.contains_points(gaia_pts) if mat.sum() > 0: xg, yg = wcs.all_world2pix(gaia_rd['ra'][mat], gaia_rd['dec'][mat], 0) sh = dq.shape mat = (xg > 0) & (xg < sh[1]) & (yg > 0) & (yg < sh[0]) if mat.sum() > 0: for xi, yi in zip(xg[mat], yg[mat]): dq |= (np.abs(xp-xi) < 2) & (np.abs(yp-yi) > 10) if i == 0: res = utils.drizzle_array_groups([clean], [ivar*(dq == 0)], [wcs], outputwcs=out_wcs, kernel=kernel, pixfrac=pixfrac, data=None, verbose=False) # Copy header keywords wcs_header = utils.to_header(wcs) for k in im[0].header: if (k not in ['', 'HISTORY', 'COMMENT']) & (k not in out_header) & (k not in wcs_header): out_header[k] = im[0].header[k] else: _ = utils.drizzle_array_groups([clean], [ivar*(dq == 0)], [wcs], outputwcs=out_wcs, kernel=kernel, pixfrac=pixfrac, data=res[:3], verbose=False) out_header['NDRIZIM'] += 1 out_header['EXPTIME'] += im[0].header['EXPTIME'] im.close() # Pixel scale factor for weights wht_scale = (out_wcs.pscale/wcs.pscale)**-4 # Write final images pyfits.writeto('{0}-{1}_drz_sci.fits'.format(root, ch), data=res[0]*to_ujy_px, header=out_header, output_verify='fix', overwrite=True) pyfits.writeto('{0}-{1}_drz_wht.fits'.format(root, ch), data=res[1]*wht_scale/to_ujy_px**2, header=out_header, output_verify='fix', overwrite=True) ########## ## Show the final drizzled images plt.ioff() files = glob.glob(f'{root}-ch*sci.fits') files.sort() if len(files) == 1: subs = 1,1 fs = [7,7] elif len(files) == 2: subs = 1,2 fs = [14,7] elif len(files) == 3: subs = 2,2 fs = [14,14] else: subs = 2,2 fs = [14,14] fig = plt.figure(figsize=fs) for i, file in enumerate(files[:4]): im = pyfits.open(file) print('{0} {1} {2:.1f} s'.format(file, im[0].header['FILTER'], im[0].header['EXPTIME'])) ax = fig.add_subplot(subs[0], subs[1], 1+i) scl = (final_pix/initial_pix)**2 ax.imshow(im[0].data, vmin=-0.1*scl, vmax=1*scl, cmap='gray_r', origin='lower') ax.text(0.05, 0.95, file, ha='left', va='top', color='k', transform=ax.transAxes) im.close() if len(files) > 1: fig.axes[1].set_yticklabels([]) if len(files) > 2: fig.axes[0].set_xticklabels([]) fig.axes[1].set_xticklabels([]) if len(files) > 3: fig.axes[3].set_yticklabels([]) fig.tight_layout(pad=0.5) fig.savefig(f'{root}.final.png') plt.close('all') if sync_results: print('gzip mosaics') os.system(f'gzip -f {root}-ch*_drz*fits {root}-mips*_drz*fits') ######## Sync ## Sync print(f's3://{bucket}/Pipeline/{root}/IRAC/') make_html(root, bucket=bucket) os.system(f'aws s3 sync ./ s3://{bucket}/Pipeline/{root}/IRAC/' f' --exclude "*" --include "{root}-ch*drz*fits*"' f' --include "{root}-mips*drz*fits*"' f' --include "{root}.*png"' ' --include "*-ch*psf*" --include "*log.fits"' ' --include "*wcs.[lp]*"' ' --include "*html" --include "*fail*"' ' --acl public-read') if sync_xbcd: aor_files = glob.glob('r*-ch*med.fits') for aor_file in aor_files: aor = aor_file.split('-ch')[0] os.system(f'aws s3 sync ./{aor}/ s3://{bucket}/IRAC/AORS/{aor}/ --exclude "*" --include "ch*/bcd/*xbcd.fits.gz" --acl public-read') os.system(f'aws s3 cp {aor_file} s3://{bucket}/IRAC/AORS/ --acl public-read') msg = f'### Done: \n https://s3.amazonaws.com/{bucket}/Pipeline/{root}/IRAC/{root}.irac.html' utils.log_comment(f'/tmp/{root}.success', msg, verbose=True, show_date=True)