Beispiel #1
0
def drizzle_images(label='macs0647-jd1', ra=101.9822125, dec=70.24326667, pixscale=0.06, size=10, wcs=None, pixfrac=0.8, kernel='square', theta=0, half_optical_pixscale=False, filters=['f160w','f814w', 'f140w','f125w','f105w','f110w','f098m','f850lp', 'f775w', 'f606w','f475w','f555w','f600lp', 'f390w', 'f350lp'], remove=True, rgb_params=RGB_PARAMS, master='grizli-jan2019', aws_bucket='s3://grizli/CutoutProducts/', scale_ab=21, thumb_height=2.0, sync_fits=True, subtract_median=True, include_saturated=True, include_ir_psf=False):
    """
    label='cp561356'; ra=150.208875; dec=1.850241667; size=40; filters=['f160w','f814w', 'f140w','f125w','f105w','f606w','f475w']
    
    
    """
    import glob
    import copy
    import os

    import numpy as np
    
    import astropy.io.fits as pyfits
    from astropy.coordinates import SkyCoord
    import astropy.units as u
    from drizzlepac.adrizzle import do_driz
    
    import boto3
    
    from grizli import prep, utils
    from grizli.pipeline import auto_script
    
    if isinstance(ra, str):
        coo = SkyCoord('{0} {1}'.format(ra, dec), unit=(u.hour, u.deg))
        ra, dec = coo.ra.value, coo.dec.value
    
    if label is None:
        try:
            import mastquery.utils
            label = mastquery.utils.radec_to_targname(ra=ra, dec=dec, round_arcsec=(1/15, 1), targstr='j{rah}{ram}{ras}{sign}{ded}{dem}{des}')
        except:
            label = 'grizli-cutout'
            
    #master = 'cosmos'
    #master = 'grizli-jan2019'
    
    if master == 'grizli-jan2019':
        parent = 's3://grizli/MosaicTools/'

        s3 = boto3.resource('s3')
        s3_client = boto3.client('s3')
        bkt = s3.Bucket('grizli')
    
    elif master == 'cosmos':
        parent = 's3://grizli-preprocess/CosmosMosaic/'

        s3 = boto3.resource('s3')
        s3_client = boto3.client('s3')
        bkt = s3.Bucket('grizli-preprocess')
    
    else:
        # Run on local files, e.g., "Prep" directory
        parent = None
        remove = False
        
    for ext in ['_visits.fits', '_visits.npy', '_filter_groups.npy'][-1:]:

        if (not os.path.exists('{0}{1}'.format(master, ext))) & (parent is not None):
            
            s3_path = parent.split('/')[-2]
            s3_file = '{0}{1}'.format(master, ext)
            print('{0}{1}'.format(parent, s3_file))
            bkt.download_file(s3_path+'/'+s3_file, s3_file,
                              ExtraArgs={"RequestPayer": "requester"})
            
            #os.system('aws s3 cp {0}{1}{2} ./'.format(parent, master, ext))
            
    #tab = utils.read_catalog('{0}_visits.fits'.format(master))
    #all_visits = np.load('{0}_visits.npy'.format(master))[0]
    if parent is not None:
        groups = np.load('{0}_filter_groups.npy'.format(master), allow_pickle=True)[0]
    else:
        # Reformat local visits.npy into a groups file
        groups_files = glob.glob('*filter_groups.npy')
        
        if len(groups_files) == 0:
            visit_file = glob.glob('*visits.npy')[0]
            visits, groups, info = np.load(visit_file)
            visit_root = visit_file.split('_visits')[0]
            
            visit_filters = np.array([v['product'].split('-')[-1] for v in visits])
            groups = {}
            for filt in np.unique(visit_filters):
                groups[filt] = {}
                groups[filt]['filter'] = filt
                groups[filt]['files'] = []
                groups[filt]['footprints'] = []
                groups[filt]['awspath'] = None
                
                ix = np.where(visit_filters == filt)[0]
                for i in ix:
                    groups[filt]['files'].extend(visits[i]['files'])
                    groups[filt]['footprints'].extend(visits[i]['footprints'])
                
            np.save('{0}_filter_groups.npy'.format(visit_root), [groups])
                
        else:
            groups = np.load(groups_files[0])[0]
        
    #filters = ['f160w','f814w', 'f110w', 'f098m', 'f140w','f125w','f105w','f606w', 'f475w']
    
    has_filts = []
    
    for filt in filters:
        if filt not in groups:
            continue
        
        visits = [copy.deepcopy(groups[filt])]
        #visits[0]['reference'] = 'CarlosGG/ak03_j1000p0228/Prep/ak03_j1000p0228-f160w_drz_sci.fits'
        
        
        visits[0]['product'] = label+'-'+filt

        if wcs is None:
            hdu = utils.make_wcsheader(ra=ra, dec=dec, size=size, pixscale=pixscale, get_hdu=True, theta=theta)

            h = hdu.header
        else:
            h = utils.to_header(wcs)
            
        if (filt[:2] in ['f0', 'f1', 'g1']) | (not half_optical_pixscale):
            #data = hdu.data  
            pass
        else:
            for k in ['NAXIS1','NAXIS2','CRPIX1','CRPIX2']:
                h[k] *= 2

            h['CRPIX1'] -= 0.5
            h['CRPIX2'] -= 0.5

            for k in ['CD1_1', 'CD1_2', 'CD2_1', 'CD2_2']:
                h[k] /= 2

            #data = np.zeros((h['NAXIS2'], h['NAXIS1']), dtype=np.int16)
                        
        #pyfits.PrimaryHDU(header=h, data=data).writeto('ref.fits', overwrite=True, output_verify='fix')
        #visits[0]['reference'] = 'ref.fits'
        
        print('\n\n###\nMake filter: {0}'.format(filt))
        
        
        if (filt.upper() in ['F105W','F125W','F140W','F160W']) & include_ir_psf:
            clean_i = False
        else:
            clean_i = remove
            
        status = utils.drizzle_from_visit(visits[0], h, pixfrac=pixfrac, kernel=kernel, clean=clean_i, include_saturated=include_saturated)
        
        if status is not None:
            sci, wht, outh = status
            
            if subtract_median:
                med = np.median(sci[sci != 0])
                print('\n\nMedian {0} = {1:.3f}\n\n'.format(filt, med))
                sci -= med
                outh['IMGMED'] = (med, 'Median subtracted from the image')
            else:
                med = 0.
                outh['IMGMED'] = (med, 'Median subtracted from the image')
                
            pyfits.writeto('{0}-{1}_drz_sci.fits'.format(label, filt), 
                           data=sci, header=outh, overwrite=True, 
                           output_verify='fix')
            
            pyfits.writeto('{0}-{1}_drz_wht.fits'.format(label, filt), 
                           data=wht, header=outh, overwrite=True, 
                           output_verify='fix')
            
            has_filts.append(filt)
            
            if (filt.upper() in ['F105W','F125W','F140W','F160W']) & include_ir_psf:
                from grizli.galfit.psf import DrizzlePSF
                
                hdu = pyfits.open('{0}-{1}_drz_sci.fits'.format(label, filt),
                                  mode='update') 
                
                flt_files = [] #visits[0]['files']
                for i in range(1, 10000):
                    key = 'FLT{0:05d}'.format(i)
                    if key not in hdu[0].header:
                        break
                    
                    flt_files.append(hdu[0].header[key])
                        
                dp = DrizzlePSF(flt_files=flt_files, driz_hdu=hdu[0])
                
                psf = dp.get_psf(ra=dp.driz_wcs.wcs.crval[0],
                                 dec=dp.driz_wcs.wcs.crval[1], 
                                 filter=filt.upper(), 
                                 pixfrac=dp.driz_header['PIXFRAC'], 
                                 kernel=dp.driz_header['KERNEL'], 
                                 wcs_slice=dp.driz_wcs, get_extended=True, 
                                 verbose=False, get_weight=False)

                psf[1].header['EXTNAME'] = 'PSF'
                #psf[1].header['EXTVER'] = filt
                hdu.append(psf[1])
                hdu.flush()
                
                #psf.writeto('{0}-{1}_drz_sci.fits'.format(label, filt), 
                #            overwrite=True, output_verify='fix')
                
        #status = prep.drizzle_overlaps(visits, parse_visits=False, check_overlaps=True, pixfrac=pixfrac, skysub=False, final_wcs=True, final_wht_type='IVM', static=True, max_files=260, fix_wcs_system=True)
        # 
        # if len(glob.glob('{0}-{1}*sci.fits'.format(label, filt))):
        #     has_filts.append(filt)
            
        if remove:
            os.system('rm *_fl*fits')
         
    if len(has_filts) == 0:
        return []
    
    if rgb_params:
        #auto_script.field_rgb(root=label, HOME_PATH=None, filters=has_filts, **rgb_params)
        
        show_all_thumbnails(label=label, thumb_height=thumb_height, scale_ab=scale_ab, close=True, rgb_params=rgb_params)
        
    if aws_bucket:   
        #aws_bucket = 's3://grizli-cosmos/CutoutProducts/'
        #aws_bucket = 's3://grizli/CutoutProducts/'
        
        s3 = boto3.resource('s3')
        s3_client = boto3.client('s3')
        bkt = s3.Bucket(aws_bucket.split("/")[2])
        aws_path = '/'.join(aws_bucket.split("/")[3:])
        
        if sync_fits:
            files = glob.glob('{0}*'.format(label))
        else:
            files = glob.glob('{0}*png'.format(label))
            
        for file in files: 
            print('{0} -> {1}'.format(file, aws_bucket))
            bkt.upload_file(file, '{0}/{1}'.format(aws_path, file).replace('//','/'), ExtraArgs={'ACL': 'public-read'})
            
        #os.system('aws s3 sync --exclude "*" --include "{0}*" ./ {1} --acl public-read'.format(label, aws_bucket))
    
        #os.system("""echo "<pre>" > index.html; aws s3 ls AWSBUCKETX --human-readable | sort -k 1 -k 2 | grep -v index | awk '{printf("%s %s",$1, $2); printf(" %6s %s ", $3, $4); print "<a href="$5">"$5"</a>"}'>> index.html; aws s3 cp index.html AWSBUCKETX --acl public-read""".replace('AWSBUCKETX', aws_bucket))
    
    return has_filts
Beispiel #2
0
def drizzle_images(label='macs0647-jd1', ra=101.9822125, dec=70.24326667, pixscale=0.1, size=10, wcs=None, pixfrac=0.33, kernel='square', theta=0, half_optical_pixscale=True, filters=['f160w', 'f140w', 'f125w', 'f105w', 'f110w', 'f098m', 'f850lp', 'f814w', 'f775w', 'f606w', 'f475w', 'f555w', 'f600lp', 'f390w', 'f350lp'], skip=None, remove=True, rgb_params=RGB_PARAMS, master='grizli-jan2019', aws_bucket='s3://grizli/CutoutProducts/', scale_ab=21, thumb_height=2.0, sync_fits=True, subtract_median=True, include_saturated=True, include_ir_psf=False, show_filters=['visb', 'visr', 'y', 'j', 'h'], combine_similar_filters=True, single_output=True, aws_prep_dir=None, make_segmentation_figure=False, get_dict=False, dryrun=False, **kwargs):
    """
    label='cp561356'; ra=150.208875; dec=1.850241667; size=40; filters=['f160w','f814w', 'f140w','f125w','f105w','f606w','f475w']

    master: These are sets of large lists of available exposures

        'cosmos': deprecated
        'grizli-cosmos-v2': All imaging covering the COSMOS field
        'candels-july2019': CANDELS fields other than COSMOS
        'grizli-v1': First processing of the Grizli CHArGE dataset
        'grizli-v1-19.12.04': Updated CHArGE fields

    """
    import glob
    import copy
    import os

    import numpy as np

    import astropy.io.fits as pyfits
    from astropy.coordinates import SkyCoord
    import astropy.units as u
    from drizzlepac.adrizzle import do_driz

    import boto3

    from grizli import prep, utils
    from grizli.pipeline import auto_script

    # Function arguments
    if get_dict:
        frame = inspect.currentframe()
        args = inspect.getargvalues(frame).locals

        pop_args = ['get_dict', 'frame', 'kwargs']
        pop_classes = (np.__class__, do_driz.__class__, SkyCoord.__class__)

        for k in kwargs:
            args[k] = kwargs[k]

        for k in args:
            if isinstance(args[k], pop_classes):
                pop_args.append(k)

        for k in pop_args:
            if k in args:
                args.pop(k)

        return args

    # Boto objects
    s3 = boto3.resource('s3')
    s3_client = boto3.client('s3')

    if isinstance(ra, str):
        coo = SkyCoord('{0} {1}'.format(ra, dec), unit=(u.hour, u.deg))
        ra, dec = coo.ra.value, coo.dec.value

    if label is None:
        try:
            import mastquery.utils
            label = mastquery.utils.radec_to_targname(ra=ra, dec=dec, round_arcsec=(1/15, 1), targstr='j{rah}{ram}{ras}{sign}{ded}{dem}{des}')
        except:
            label = 'grizli-cutout'

    #master = 'cosmos'
    #master = 'grizli-jan2019'

    if master == 'grizli-jan2019':
        parent = 's3://grizli/MosaicTools/'
        bkt = s3.Bucket('grizli')
    elif master == 'cosmos':
        parent = 's3://grizli-preprocess/CosmosMosaic/'
        bkt = s3.Bucket('grizli-preprocess')
    elif master == 'grizli-cosmos-v2':
        parent = 's3://grizli-cosmos-v2/Mosaics/'
        bkt = s3.Bucket('grizli-cosmos-v2')
    elif master == 'candels-july2019':
        parent = 's3://grizli-v1/Mosaics/'
        bkt = s3.Bucket('grizli-v1')
    elif master == 'grizli-v1-19.12.04':
        parent = 's3://grizli-v1/Mosaics/'
        bkt = s3.Bucket('grizli-v1')
    elif master == 'grizli-v1-19.12.05':
        parent = 's3://grizli-v1/Mosaics/'
        bkt = s3.Bucket('grizli-v1')
    else:
        # Run on local files, e.g., "Prep" directory
        parent = None
        bkt = None
        #remove = False

    # Download summary files from S3
    for ext in ['_visits.fits', '_visits.npy', '_filter_groups.npy'][-1:]:
        newfile = '{0}{1}'.format(master, ext)
        if (not os.path.exists(newfile)) & (parent is not None):

            s3_path = parent.split('/')[-2]
            s3_file = '{0}{1}'.format(master, ext)
            print('{0}{1}'.format(parent, s3_file))
            bkt.download_file(s3_path+'/'+s3_file, s3_file,
                              ExtraArgs={"RequestPayer": "requester"})

            #os.system('aws s3 cp {0}{1}{2} ./'.format(parent, master, ext))

    #tab = utils.read_catalog('{0}_visits.fits'.format(master))
    #all_visits = np.load('{0}_visits.npy'.format(master))[0]
    if parent is not None:
        groups = np.load('{0}_filter_groups.npy'.format(master), allow_pickle=True)[0]
    else:

        if aws_prep_dir is not None:
            spl = aws_prep_dir.replace('s3://', '').split('/')
            prep_bucket = spl[0]
            prep_root = spl[2]

            prep_bkt = s3.Bucket(prep_bucket)

            s3_prep_path = 'Pipeline/{0}/Prep/'.format(prep_root)
            s3_full_path = '{0}/{1}'.format(prep_bucket, s3_prep_path)
            s3_file = '{0}_visits.npy'.format(prep_root)

            # Make output path Prep/../Thumbnails/
            if aws_bucket is not None:
                aws_bucket = ('s3://' +
                              s3_full_path.replace('/Prep/', '/Thumbnails/'))

            print('{0}{1}'.format(s3_prep_path, s3_file))
            if not os.path.exists(s3_file):
                prep_bkt.download_file(os.path.join(s3_prep_path, s3_file),
                            s3_file, ExtraArgs={"RequestPayer": "requester"})

            groups_files = glob.glob('{0}_filter_groups.npy'.format(prep_root))
            visit_query = prep_root+'_'
        else:
            groups_files = glob.glob('*filter_groups.npy')
            visit_query = '*'

        # Reformat local visits.npy into a groups file
        if (len(groups_files) == 0):

            visit_file = glob.glob(visit_query+'visits.npy')[0]

            visits, groups, info = np.load(visit_file, allow_pickle=True)
            visit_root = visit_file.split('_visits')[0]

            visit_filters = np.array([v['product'].split('-')[-1] for v in visits])
            groups = {}
            for filt in np.unique(visit_filters):
                groups[filt] = {}
                groups[filt]['filter'] = filt
                groups[filt]['files'] = []
                groups[filt]['footprints'] = []
                groups[filt]['awspath'] = []

                ix = np.where(visit_filters == filt)[0]
                for i in ix:
                    groups[filt]['files'].extend(visits[i]['files'])
                    groups[filt]['footprints'].extend(visits[i]['footprints'])

                Nf = len(groups[filt]['files'])
                print('{0:>6}: {1:>3} exposures'.format(filt, Nf))

                if aws_prep_dir is not None:
                    groups[filt]['awspath'] = [s3_full_path
                                               for file in range(Nf)]

            np.save('{0}_filter_groups.npy'.format(visit_root), [groups])

        else:
            print('Use groups file: {0}'.format(groups_files[0]))

            groups = np.load(groups_files[0], allow_pickle=True)[0]

    #filters = ['f160w','f814w', 'f110w', 'f098m', 'f140w','f125w','f105w','f606w', 'f475w']

    filt_dict = FilterDict()
    filt_dict.meta['label'] = label
    filt_dict.meta['ra'] = ra
    filt_dict.meta['dec'] = dec
    filt_dict.meta['size'] = size
    filt_dict.meta['master'] = master
    filt_dict.meta['parent'] = parent

    if filters is None:
        filters = list(groups.keys())

    has_filts = []
    lower_filters = [f.lower() for f in filters]
    for filt in lower_filters:
        if filt not in groups:
            continue

        visits = [copy.deepcopy(groups[filt])]
        #visits[0]['reference'] = 'CarlosGG/ak03_j1000p0228/Prep/ak03_j1000p0228-f160w_drz_sci.fits'

        visits[0]['product'] = label+'-'+filt

        if wcs is None:
            hdu = utils.make_wcsheader(ra=ra, dec=dec, size=size, pixscale=pixscale, get_hdu=True, theta=theta)

            h = hdu.header
        else:
            h = utils.to_header(wcs)

        if (filt[:2] in ['f0', 'f1', 'g1']) | (not half_optical_pixscale):
            #data = hdu.data
            pass
        else:
            for k in ['NAXIS1', 'NAXIS2', 'CRPIX1', 'CRPIX2']:
                h[k] *= 2

            h['CRPIX1'] -= 0.5
            h['CRPIX2'] -= 0.5

            for k in ['CD1_1', 'CD1_2', 'CD2_1', 'CD2_2']:
                if k in h:
                    h[k] /= 2

            #data = np.zeros((h['NAXIS2'], h['NAXIS1']), dtype=np.int16)

        #pyfits.PrimaryHDU(header=h, data=data).writeto('ref.fits', overwrite=True, output_verify='fix')
        #visits[0]['reference'] = 'ref.fits'

        print('\n\n###\nMake filter: {0}'.format(filt))

        if (filt.upper() in ['F105W', 'F110W', 'F125W', 'F140W', 'F160W']) & include_ir_psf:
            clean_i = False
        else:
            clean_i = remove

        status = utils.drizzle_from_visit(visits[0], h, pixfrac=pixfrac, kernel=kernel, clean=clean_i, include_saturated=include_saturated, skip=skip, dryrun=dryrun)

        if dryrun:
            filt_dict[filt] = status
            continue

        elif status is not None:
            sci, wht, outh, filt_dict[filt] = status

            if subtract_median:
                #med = np.median(sci[sci != 0])
                try:
                    un_data = np.unique(sci[(sci != 0) & np.isfinite(sci)])
                    med = utils.mode_statistic(un_data)
                except:
                    med = 0.

                if not np.isfinite(med):
                    med = 0.

                print('\n\nMedian {0} = {1:.3f}\n\n'.format(filt, med))
                outh['IMGMED'] = (med, 'Median subtracted from the image')
            else:
                med = 0.
                outh['IMGMED'] = (0., 'Median subtracted from the image')

            pyfits.writeto('{0}-{1}_drz_sci.fits'.format(label, filt),
                           data=sci, header=outh, overwrite=True,
                           output_verify='fix')

            pyfits.writeto('{0}-{1}_drz_wht.fits'.format(label, filt),
                           data=wht, header=outh, overwrite=True,
                           output_verify='fix')

            has_filts.append(filt)

            if (filt.upper() in ['F105W', 'F110W', 'F125W', 'F140W', 'F160W']) & include_ir_psf:
                from grizli.galfit.psf import DrizzlePSF

                hdu = pyfits.open('{0}-{1}_drz_sci.fits'.format(label, filt),
                                  mode='update')

                flt_files = []  # visits[0]['files']
                for i in range(1, 10000):
                    key = 'FLT{0:05d}'.format(i)
                    if key not in hdu[0].header:
                        break

                    flt_files.append(hdu[0].header[key])

                try:

                    dp = DrizzlePSF(flt_files=flt_files, driz_hdu=hdu[0])

                    psf = dp.get_psf(ra=dp.driz_wcs.wcs.crval[0],
                                 dec=dp.driz_wcs.wcs.crval[1],
                                 filter=filt.upper(),
                                 pixfrac=dp.driz_header['PIXFRAC'],
                                 kernel=dp.driz_header['KERNEL'],
                                 wcs_slice=dp.driz_wcs, get_extended=True,
                                 verbose=False, get_weight=False)

                    psf[1].header['EXTNAME'] = 'PSF'
                    #psf[1].header['EXTVER'] = filt
                    hdu.append(psf[1])
                    hdu.flush()

                except:
                    pass

        if remove:
            os.system('rm *_fl*fits')

    # Dry run, just return dictionary of the found exposure files
    if dryrun:
        return filt_dict

    # Nothing found
    if len(has_filts) == 0:
        return []

    if combine_similar_filters:
        combine_filters(label=label)

    if rgb_params:
        #auto_script.field_rgb(root=label, HOME_PATH=None, filters=has_filts, **rgb_params)
        show_all_thumbnails(label=label, thumb_height=thumb_height, scale_ab=scale_ab, close=True, rgb_params=rgb_params, filters=show_filters)

    if (single_output != 0):
        # Concatenate into a single FITS file
        files = glob.glob('{0}-f*_dr[cz]_sci.fits'.format(label))
        files.sort()

        if combine_similar_filters:
            comb_files = glob.glob('{0}-[a-eg-z]*_dr[cz]_sci.fits'.format(label))
            comb_files.sort()
            files += comb_files

        hdul = None
        for file in files:
            hdu_i = pyfits.open(file)
            hdu_i[0].header['EXTNAME'] = 'SCI'
            if 'NCOMBINE' in hdu_i[0].header:
                if hdu_i[0].header['NCOMBINE'] <= single_output:
                    continue

                filt_i = file.split('-')[-1].split('_dr')[0]
            else:
                filt_i = utils.get_hst_filter(hdu_i[0].header)

            for h in hdu_i:
                h.header['EXTVER'] = filt_i
                if hdul is None:
                    hdul = pyfits.HDUList([h])
                else:
                    hdul.append(h)

            print('Add to {0}.thumb.fits: {1}'.format(label, file))

            # Weight
            hdu_i = pyfits.open(file.replace('_sci', '_wht'))
            hdu_i[0].header['EXTNAME'] = 'WHT'
            for h in hdu_i:
                h.header['EXTVER'] = filt_i
                if hdul is None:
                    hdul = pyfits.HDUList([h])
                else:
                    hdul.append(h)

        hdul.writeto('{0}.thumb.fits'.format(label), overwrite=True,
                     output_verify='fix')

        for file in files:
            for f in [file, file.replace('_sci', '_wht')]:
                if os.path.exists(f):
                    print('Remove {0}'.format(f))
                    os.remove(f)

    # Segmentation figure
    thumb_file = '{0}.thumb.fits'.format(label)
    if (make_segmentation_figure) & (os.path.exists(thumb_file)) & (aws_prep_dir is not None):

        print('Make segmentation figure')

        # Fetch segmentation image and catalog
        s3_prep_path = 'Pipeline/{0}/Prep/'.format(prep_root)
        s3_full_path = '{0}/{1}'.format(prep_bucket, s3_prep_path)
        s3_file = '{0}_visits.npy'.format(prep_root)

        has_seg_files = True
        seg_files = ['{0}-ir_seg.fits.gz'.format(prep_root),
                     '{0}_phot.fits'.format(prep_root)]

        for s3_file in seg_files:
            if not os.path.exists(s3_file):
                remote_file = os.path.join(s3_prep_path, s3_file)
                try:
                    print('Fetch {0}'.format(remote_file))
                    prep_bkt.download_file(remote_file, s3_file,
                                   ExtraArgs={"RequestPayer": "requester"})
                except:
                    has_seg_files = False
                    print('Make segmentation figure failed: {0}'.format(remote_file))
                    break

        if has_seg_files:
            s3_cat = utils.read_catalog(seg_files[1])
            segmentation_figure(label, s3_cat, seg_files[0])

    if aws_bucket:
        #aws_bucket = 's3://grizli-cosmos/CutoutProducts/'
        #aws_bucket = 's3://grizli/CutoutProducts/'

        s3 = boto3.resource('s3')
        s3_client = boto3.client('s3')
        bkt = s3.Bucket(aws_bucket.split("/")[2])
        aws_path = '/'.join(aws_bucket.split("/")[3:])

        if sync_fits:
            files = glob.glob('{0}*'.format(label))
        else:
            files = glob.glob('{0}*png'.format(label))

        for file in files:
            print('{0} -> {1}'.format(file, aws_bucket))
            bkt.upload_file(file, '{0}/{1}'.format(aws_path, file).replace('//', '/'), ExtraArgs={'ACL': 'public-read'})

        #os.system('aws s3 sync --exclude "*" --include "{0}*" ./ {1} --acl public-read'.format(label, aws_bucket))

        #os.system("""echo "<pre>" > index.html; aws s3 ls AWSBUCKETX --human-readable | sort -k 1 -k 2 | grep -v index | awk '{printf("%s %s",$1, $2); printf(" %6s %s ", $3, $4); print "<a href="$5">"$5"</a>"}'>> index.html; aws s3 cp index.html AWSBUCKETX --acl public-read""".replace('AWSBUCKETX', aws_bucket))

    return has_filts