Пример #1
0
 def makeCat(self, imgfile, instdet, weightfile=None, extref=False):
     """Makes a catalog of objects to be used for input to superalign and creates a DS9 region file of objects"""
     
     imgfile_cat = '%s.cat' % imgfile.replace('.fits', '')
     imgfile_reg = '%s.reg' % imgfile.replace('.fits', '')
 
     o_radec = []
     ext = 0
     objectlist = self.findSources('%s[%s]' % (imgfile, ext), imgfile_cat, instdet, weightfile, extref=extref)
     cleanobjectlist = self.removeCloseSources(objectlist)
     print 'Found %s sources' % len(cleanobjectlist)
     wcs = HSTWCS(str(imgfile))
     for obj in cleanobjectlist:
         sky = wcs.all_pix2world(np.array([[obj.x, obj.y]]), 1)
         o_radec.append([obj.ra[0], obj.dec[0]])
         obj.ra = sky[0][0]
         obj.dec = sky[0][1]
     
     # Write out a ds9 region file of object selected for alignment
     regout = open(imgfile_reg, 'w')
     regout.write('global color=green font="helvetica 8 normal" edit=1 move=1 delete=1 include=1 fixed=0\nfk5\n')
     for i,rd in enumerate(o_radec):
         oid = i+1
         regout.write('circle(%s,%s,%s") # color=%s text={%s}\n' % (rd[0], rd[1], 0.5, 'red', oid))
     regout.close()
 
     # Now we need to write out the catalog in the reference image coords in arcseconds with respect to center of the image
     catout = open(imgfile_cat, 'w')
     for i,obj in enumerate(cleanobjectlist):
         oid = i+1
         catout.write('%i %.9f %.9f %.4f %.4f %.4f\n' % (oid, obj.ra, obj.dec, obj.x, obj.y, obj.mag))
     catout.close()
     return
Пример #2
0
    def build(self, expnames):
        for exposure in expnames:
            blank = np.zeros(self.meta_wcs.array_shape, dtype=np.int16)
            exp = fits.open(exposure)
            sci_extns = wcs_functions.get_extns(exp)
            for sci in sci_extns:
                wcs = HSTWCS(exp, ext=sci)
                edges_x = [0]*wcs.naxis2 + [wcs.naxis1-1]*wcs.naxis2 + list(range(wcs.naxis1)) * 2
                edges_y = list(range(wcs.naxis2)) * 2 + [0]*wcs.naxis1 + [wcs.naxis2-1]*wcs.naxis1

                sky_edges = wcs.pixel_to_world_values(np.vstack([edges_x, edges_y]).T)
                meta_edges = self.meta_wcs.world_to_pixel_values(sky_edges).astype(np.int32)
                # Account for rounding problems with creating meta_wcs
                meta_edges[:,1] = np.clip(meta_edges[:,1], 0, self.meta_wcs.array_shape[0]-1)
                meta_edges[:,0] = np.clip(meta_edges[:,0], 0, self.meta_wcs.array_shape[1]-1)

                # apply meta_edges to blank mask
                # Use PIL to create mask
                parray = np.array(meta_edges.T)
                polygon = list(zip(parray[0], parray[1]))
                nx = self.meta_wcs.array_shape[1]
                ny = self.meta_wcs.array_shape[0]
                img = Image.new('L', (nx, ny) , 0)
                ImageDraw.Draw(img).polygon(polygon, outline=1, fill=1)
                blank = np.array(img)

                self.total_mask += blank.astype(np.int16)
Пример #3
0
    def build(self, expnames):
        for exposure in expnames:
            blank = np.zeros(self.meta_wcs.array_shape, dtype=np.int16)
            exp = fits.open(exposure)
            sci_extns = wcs_functions.get_extns(exp)
            for sci in sci_extns:
                wcs = HSTWCS(exp, ext=sci)
                edges_x = [0] * wcs.naxis2 + [wcs.naxis1 - 1
                                              ] * wcs.naxis2 + list(
                                                  range(wcs.naxis1)) * 2
                edges_y = list(range(wcs.naxis2)) * 2 + [0] * wcs.naxis1 + [
                    wcs.naxis2 - 1
                ] * wcs.naxis1

                sky_edges = wcs.pixel_to_world_values(
                    np.vstack([edges_x, edges_y]).T)
                meta_edges = self.meta_wcs.world_to_pixel_values(
                    sky_edges).astype(np.int32)
                blank[meta_edges[:, 1], meta_edges[:, 0]] = 1

                # Fill in outline of each chip
                blank = morphology.binary_dilation(blank,
                                                   structure=NDIMAGE_STRUCT2)
                blank = morphology.binary_fill_holes(blank)
                blank = morphology.binary_erosion(blank,
                                                  structure=NDIMAGE_STRUCT2)

            self.total_mask += blank.astype(np.int16)
Пример #4
0
def stars2cat(drzfile, refwcs):
    """
     Makes catalog file from a superalign .stars file
    """
    wcs = HSTWCS(fits.open(drzfile))
    starsfile = drzfile.replace('.fits', '_sa.cat.stars')
    catfile = '%s.cat' % starsfile
    rcat = np.genfromtxt(starsfile, usecols=(1, 2))
    rrdcat = wcs.all_pix2world(
        (rcat / wcs.pscale) + [wcs.naxis1 / 2, wcs.naxis2 / 2], 1).tolist()
    xy = refwcs.all_world2pix(rrdcat, 1).tolist()
    with open(catfile, 'w') as catout:
        for i, rd in enumerate(rrdcat):
            catout.write('%d %.8f %.8f %.3f %.3f %d\n' %
                         (i, rd[0], rd[1], xy[i][0], xy[i][1], i))
Пример #5
0
 def makeSACat(self, imgfile, extref=False):
     """
     Makes a catalog of objects to be used for input to superalign from a external reference catalog.
     Catalog should be of form: ID RA(deg) Dec(deg) Mag
     Output appends _sa
     """
     if extref:
         wcs = self.refwcs
     else:
         wcs = HSTWCS(imgfile)
     cat = imgfile.replace('.fits', '.cat')
     outcat = imgfile.replace('.fits', '_sa.cat')
     data = ascii.read(cat, names=['id', 'ra', 'dec', 'x', 'y', 'mag'])
     arcs = (wcs.all_world2pix(zip(data['ra'], data['dec']), 1) - [wcs.naxis1/2, wcs.naxis2/2])*wcs.pscale
     ascii.write([data['id'], arcs[:,0], arcs[:,1], data['mag']], outcat, format='no_header')
     return
Пример #6
0
    def _set_coefficients(self):
        """Extracts the drizzle coefficients."""
        try:
            self.imwcs = HSTWCS(self.image, ext=self.detector)
            #self.xcoeffs, self.ycoeffs = coeff_converter.sip2idc(self.imwcs)
        except ValueError:
            raise aXeError(
                "Could not determine distorsion coefficients from header.")

        # dictionary with the fixed names for the orders
        fixed_orders = {
            1: 'constant',
            2: 'linear',
            3: 'quadratic',
            4: 'cubic',
            5: 'quintic'
        }
        self.xcoeffs = np.array([[0.0], [1.0], [0.0], [0.0], [0.0], [0.0],
                                 [0.0], [0.0], [0.0], [0.0]])
        self.ycoeffs = np.array([[0.0], [0.0], [1.0], [0.0], [0.0], [0.0],
                                 [0.0], [0.0], [0.0], [0.0]])

        # return the order
        # for HST the a and b orders should always match
        # sip.ap_order is the inverse
        # if (self.imwcs.sip.a_order != self.imwcs.sip.b_order):
        #     raise aXeError("SIP a and b orders don't match!")
        # self.order = fixed_orders[self.imwcs.sip.a_order]
        try:
            self.order = fixed_orders[len(self.xcoeffs) / 2]
        except KeyError:
            raise aXeError(
                f'Coefficient order not available: {self.order} in {fixed_orders}'
            )
Пример #7
0
def applyShiftsSA(visit):
    sa_out = '%s_simplematch.out' % visit
    drzs = {}
    print('Reading %s...' % sa_out)
    for i in [j.split() for j in open(sa_out).readlines()]:
        drz = i[0].replace('_sa.cat', '.fits')
        drzs[drz] = [float(i[1]), float(i[2]), float(i[3])]
    print("Applying shifts...")
    with open('sa_shifts.txt', 'a') as sas:
        for d, s in drzs.items():
            if os.path.exists(d):
                if d[:6] == visit:
                    if s:
                        dx, dy, dt = s
                        wcs = HSTWCS(fits.open(d))
                        dxp = round(dx / wcs.pscale, 3)
                        dyp = round(dy / wcs.pscale, 3)
                        dtp = round(dt, 3)
                        offsets = [d, dxp, dyp, dtp]
                        sas.write('%s %.3f %.3f %.3f\n' % (d, dxp, dyp, dtp))
                        print(d, dxp, dyp, dtp)
                        updatehdr.updatewcs_with_shift(d,
                                                       d,
                                                       wcsname='DRZWCS',
                                                       xsh=dxp,
                                                       ysh=dyp,
                                                       rot=dtp,
                                                       scale=1.0,
                                                       force=True)
Пример #8
0
def match_to_gaia(imcat, refcat, product, output, searchrad=5.0):
    """Create a catalog with sources matched to GAIA sources
    
    Parameters
    ----------
    imcat : str or obj
        Filename or astropy.Table of source catalog written out as ECSV file
        
    refcat : str
        Filename of GAIA catalog files written out as ECSV file
        
    product : str
        Filename of drizzled product used to derive the source catalog
        
    output : str
        Rootname for matched catalog file to be written as an ECSV file 
    
    """
    if isinstance(imcat, str):
        imtab = Table.read(imcat, format='ascii.ecsv')
        imtab.rename_column('X-Center', 'x')
        imtab.rename_column('Y-Center', 'y')
    else:
        imtab = imcat
        if 'X-Center' in imtab.colnames:
            imtab.rename_column('X-Center', 'x')
            imtab.rename_column('Y-Center', 'y')
            
    
    reftab = Table.read(refcat, format='ascii.ecsv')
    
    # define WCS for matching
    tpwcs = tweakwcs.FITSWCS(HSTWCS(product, ext=1))
    
    # define matching parameters
    tpmatch = tweakwcs.TPMatch(searchrad=searchrad)
    
    # perform match
    ref_indx, im_indx = tpmatch(reftab, imtab, tpwcs)
    print('Found {} matches'.format(len(ref_indx)))
    
    # Obtain tangent plane positions for both image sources and reference sources
    im_x, im_y = tpwcs.det_to_tanp(imtab['x'][im_indx], imtab['y'][im_indx])
    ref_x, ref_y = tpwcs.world_to_tanp(reftab['RA'][ref_indx], reftab['DEC'][ref_indx])
    if 'RA' not in imtab.colnames:
        im_ra, im_dec = tpwcs.det_to_world(imtab['x'][im_indx], imtab['y'][im_indx])
    else:
        im_ra = imtab['RA'][im_indx]
        im_dec = imtab['DEC'][im_indx]
        

    # Compile match table
    match_tab = Table(data=[im_x, im_y, im_ra, im_dec, 
                            ref_x, ref_y, 
                            reftab['RA'][ref_indx], reftab['DEC'][ref_indx]],
                      names=['img_x','img_y', 'img_RA', 'img_DEC', 
                             'ref_x', 'ref_y', 'ref_RA', 'ref_DEC'])
    if not output.endswith('.ecsv'):
        output = '{}.ecsv'.format(output)                             
    match_tab.write(output, format='ascii.ecsv')
Пример #9
0
def generate_source_catalogs(imglist, **pars):
    """Generates a dictionary of source catalogs keyed by image name.

    Parameters
    ----------
    imglist : list
        List of one or more calibrated fits images that will be used for source detection.

    Returns
    -------
    sourcecatalogdict : dictionary
        a dictionary (keyed by image name) of two element dictionaries which in tern contain 1) a dictionary of the
        detector-specific processing parameters and 2) an astropy table of position and photometry information of all
        detected sources
    """
    output = pars.get('output', False)
    sourcecatalogdict = {}
    for imgname in imglist:
        print("Image name: ", imgname)

        sourcecatalogdict[imgname] = {}

        # open image
        imghdu = fits.open(imgname)
        imgprimaryheader = imghdu[0].header
        instrument = imgprimaryheader['INSTRUME'].lower()
        detector = imgprimaryheader['DETECTOR'].lower()

        # get instrument/detector-specific image alignment parameters
        if instrument in detector_specific_params.keys():
            if detector in detector_specific_params[instrument].keys():
                detector_pars = detector_specific_params[instrument][detector]
                # to allow generate_source_catalog to get detector specific parameters
                detector_pars.update(pars)
                sourcecatalogdict[imgname]["params"] = detector_pars
            else:
                sys.exit("ERROR! Unrecognized detector '{}'. Exiting...".format(detector))
        else:
            sys.exit("ERROR! Unrecognized instrument '{}'. Exiting...".format(instrument))

        # Identify sources in image, convert coords from chip x, y form to reference WCS sky RA, Dec form.
        imgwcs = HSTWCS(imghdu, 1)
        fwhmpsf_pix = sourcecatalogdict[imgname]["params"]['fwhmpsf']/imgwcs.pscale #Convert fwhmpsf from arsec to pixels

        sourcecatalogdict[imgname]["catalog_table"] = amutils.generate_source_catalog(imghdu, fwhm=fwhmpsf_pix, **detector_pars)

        # write out coord lists to files for diagnostic purposes. Protip: To display the sources in these files in DS9,
        # set the "Coordinate System" option to "Physical" when loading the region file.
        imgroot = os.path.basename(imgname).split('_')[0]
        numSci = amutils.countExtn(imghdu)
        # Allow user to decide when and how to write out catalogs to files
        if output:
            for chip in range(1,numSci+1):
                regfilename = "{}_sci{}_src.reg".format(imgroot, chip)
                out_table = Table(sourcecatalogdict[imgname]["catalog_table"][chip])
                out_table.write(regfilename, include_names=["xcentroid", "ycentroid"], format="ascii.fast_commented_header")
                print("Wrote region file {}\n".format(regfilename))
        imghdu.close()
    return(sourcecatalogdict)
Пример #10
0
def test_prihdu_with_extver_no_extname():
    hdulist = fits.HDUList([
        fits.PrimaryHDU(header=fits.Header([('extver', 7)])),
        fits.ImageHDU(header=fits.Header([('time', 6)]))
    ])
    extname = HSTWCS(hdulist).extname
    assert extname == ('PRIMARY', 7)
    assert hdulist[extname] is hdulist[0]
Пример #11
0
def makeSAin(visit, imgs, refwcs, refcat_sa):
    sa_file = '%s_superalign.in' % visit
    print('Creating %s' % sa_file)
    sa_in = open(sa_file, 'w')
    sa_in.write('%s 1\n' % str(len(imgs) + 1))
    sa_in.write('%s 0.000 0.000 0.000\n' % refcat_sa)
    for drz in imgs:
        f = os.path.basename(drz)
        wcs = HSTWCS(fits.open(drz))
        sky = wcs.all_pix2world([[wcs.naxis1 / 2, wcs.naxis2 / 2]], 1)
        arc = (refwcs.all_world2pix(sky, 1) -
               [refwcs.naxis1 / 2, refwcs.naxis2 / 2]) * refwcs.pscale
        rot = round(refwcs.orientat - wcs.orientat, 3)
        sa_in.write('%s %s %s %s\n' %
                    (f.replace('.fits', '_sa.cat'), round(
                        arc[0][0], 3), round(arc[0][1], 3), rot))
    sa_in.close()
Пример #12
0
 def makeSACat(self, imgfile, extref=False):
     """
     Makes a catalog of objects to be used for input to superalign from a external reference catalog.
     Catalog should be of form: ID RA(deg) Dec(deg) Mag
     Output appends _sa
     """
     if extref:
         wcs = self.refwcs
     else:
         wcs = HSTWCS(imgfile)
     cat = imgfile.replace('.fits', '.cat')
     outcat = imgfile.replace('.fits', '_sa.cat')
     data = ascii.read(cat, names=['id', 'ra', 'dec', 'x', 'y', 'mag'])
     arcs = (wcs.all_world2pix(list(zip(data['ra'], data['dec'])), 1) -
             [wcs.naxis1 / 2, wcs.naxis2 / 2]) * wcs.pscale
     ascii.write([data['id'], arcs[:, 0], arcs[:, 1], data['mag']],
                 outcat,
                 format='no_header',
                 overwrite=True)
     return
Пример #13
0
    def makeCat(self, imgfile, instdet, weightfile=None, extref=False):
        """Makes a catalog of objects to be used for input to superalign and creates a DS9 region file of objects"""

        imgfile_cat = '%s_all.cat' % imgfile.replace('.fits', '')
        imgfile_reg = '%s_all.reg' % imgfile.replace('.fits', '')

        o_radec = []
        ext = 0
        objectlist = self.findSources('%s[%s]' % (imgfile, ext),
                                      imgfile_cat,
                                      instdet,
                                      weightfile,
                                      extref=extref)
        cleanobjectlist = self.removeCloseSources(objectlist)
        print('Found %s sources' % len(cleanobjectlist))
        wcs = HSTWCS(str(imgfile))
        for obj in cleanobjectlist:
            sky = wcs.all_pix2world(np.array([[obj.x, obj.y]]), 1)
            o_radec.append([obj.ra[0], obj.dec[0]])
            obj.ra = sky[0][0]
            obj.dec = sky[0][1]

        # Write out a ds9 region file of object selected for alignment
        regout = open(imgfile_reg, 'w')
        regout.write(
            'global color=green font="helvetica 8 normal" edit=1 move=1 delete=1 include=1 fixed=0\nfk5\n'
        )
        for i, rd in enumerate(o_radec):
            oid = i + 1
            regout.write('circle(%s,%s,%s") # color=%s text={%s}\n' %
                         (rd[0], rd[1], 0.5, 'red', oid))
        regout.close()

        # Now we need to write out the catalog in the reference image coords in arcseconds with respect to center of the image
        catout = open(imgfile_cat, 'w')
        for i, obj in enumerate(cleanobjectlist):
            oid = i + 1
            catout.write('%i %.9f %.9f %.4f %.4f %.4f\n' %
                         (oid, obj.ra, obj.dec, obj.x, obj.y, obj.mag))
        catout.close()
        return
Пример #14
0
    def _get_sci_group(i, index):
        d = Data("%s_%i" % (label, index))
        d.coords = coordinates_from_wcs(HSTWCS(hdulist, i))

        index = index + 1
        d.add_component(hdulist[i].data, hdulist[i].name)
        for h in hdulist[i:]:
            if h.name == 'SCI':
                break  # new science grp
            if h.name not in ['ERR', 'DQ']:
                continue
            d.add_component(h.data, h.name)
        return d
Пример #15
0
def getFootprint(fitsfile):
    """Get footprint of image"""
    instdet = getInstDet(fitsfile)
    fin = fits.open(fitsfile)
    exts = sciexts[instdet]
    wcs = HSTWCS(fin, exts[0])
    points = wcs.calc_footprint()
    if len(exts) > 1:
        wcs = HSTWCS(fin, exts[1])
        points = np.vstack((points, wcs.calc_footprint()))
    else:
        points = wcs.calc_footprint()
    hull = ConvexHull(points)
    vertices = np.take(points, hull.vertices, axis=0).flatten().tolist()
    fltr = getFilter(fitsfile).upper()
    dsn = fitsfile[:9]
    with open(fitsfile.replace('.fits', '_footprint.reg'), 'w') as reg:
        reg.write('global color=green font="helvetica 8 normal" edit=1 move=1 delete=1 include=1 fixed=0\nfk5\n')
        if fltr in filters.keys():
            c = filters[fltr]
        else:
            c = 'white'
        reg.write('polygon(%s) # color=%s text={%s}\n' % (str(vertices)[1:-1], c, dsn))
Пример #16
0
def refineShiftMCMC(drzfile):
    """
     Refine shifts using MCMC
    """
    dsn = drzfile[:9]
    wcs = HSTWCS(fits.open(drzfile))
    wcsn = fits.getval(drzfile, 'wcsname')
    try:
        refcat = np.loadtxt('%s_drz_sci_sa_ref_match.cat' % dsn,
                            usecols=(1, 2))
        imgcat = np.loadtxt('%s_drz_sci_sa_match.cat' % dsn, usecols=(1, 2))

        refcatw = wcs.all_world2pix(refcat, 1)
        imgcatw = wcs.all_world2pix(imgcat, 1)

        ox, oy = wcs.wcs.crpix.tolist()

        offset, err = mcmcShifts.findOffsetMCMC(imgcatw,
                                                refcatw,
                                                maxShift=(10, 10, 0.3),
                                                rotOrigin=(ox, oy),
                                                precision=0.01,
                                                visualize=False)
        print(drzfile, offset, err)

        dxp, dyp, dtp = offset
        updatehdr.updatewcs_with_shift(drzfile,
                                       drzfile,
                                       wcsname='DRZWCS',
                                       xsh=dxp,
                                       ysh=dyp,
                                       rot=dtp,
                                       scale=1.0,
                                       force=True)
        return offset
    except UserWarning:
        pass
Пример #17
0
def cli(ctx, itype, otype, ptask):
    """
    Drizzles and individual images to be used for alignment
    """
    dsn = ctx.dataset_name
    ctx.log('Running task %s for dataset %s', task, dsn)
    procdir = os.path.join(ctx.rundir, dsn)
    os.chdir(procdir)
    cfgf = '%s_cfg.json' % dsn
    cfg = hutils.rConfig(cfgf)
    tcfg = cfg['tasks'][task] = {}
    tcfg['ptask'] = ptask
    tcfg['itype'] = itype
    tcfg['otype'] = otype
    tcfg['stime'] = ctx.dt()
    tcfg['completed'] = False
    if ctx.refimg:
        refimg = str(ctx.refimg)
        refwcs = HSTWCS(refimg)
        cfg['refimg'] = refimg
        pscale = refwcs.pscale
        orientat = refwcs.orientat
    else:
        # use native pixel scale
        pscale = None
        orientat = 0
    images = hutils.imgList(cfg['images'])
    infiles = [str('%s%s' % (i, itype)) for i in images]

    n = len(infiles)
    with click.progressbar(infiles,
                           label='Generating single drizzled images') as pbar:
        for i, f in enumerate(pbar):
            ctx.vlog('\n\nDrizzling image %s - %s of %s', f, i + 1, n)
            try:
                drizzle_image.drzImage(f, pscale, orientat)
            except Exception as e:
                hutils.wConfig(cfg, cfgf)
                print(e)
                raise

    tcfg['etime'] = ctx.dt()
    tcfg['completed'] = True
    ctx.vlog('Writing configuration file %s for %s task', cfgf, task)
    hutils.wConfig(cfg, cfgf)
Пример #18
0
    def __init__(self, filename):
        if isinstance(filename, str):
            self.imghdu = fits.open(filename)
            self.imgname = filename
        else:
            self.imghdu = filename
            self.imgname = filename.filename()

        if 'rootname' in self.imghdu[0].header:
            self.rootname = self.imghdu[0].header['rootname']
        else:
            self.rootname = self.imgname.rstrip('.fits')

        # Fits file read
        self.num_sci = amutils.countExtn(self.imghdu)
        self.num_wht = amutils.countExtn(self.imghdu, extname='WHT')
        self.data = np.concatenate(
            [self.imghdu[('SCI', i + 1)].data for i in range(self.num_sci)])
        if not self.num_wht:
            self.dqmask = self.build_dqmask()
        else:
            self.dqmask = None
        self.wht_image = self.build_wht_image()

        # Get the HSTWCS object from the first extension
        self.imgwcs = HSTWCS(self.imghdu, 1)
        self.pscale = self.imgwcs.pscale

        self._wht_image = None
        self.bkg = {}
        self.bkg_dao_rms = {}
        self.bkg_rms_mean = {}
        self.threshold = {}

        self.kernel = None
        self.kernel_fwhm = None
        self.kernel_psf = False
        self.fwhmpsf = None

        self.catalog_table = {}
Пример #19
0
def restoreWCSdrz(img, ext):
    wcs = HSTWCS(img, ext=ext, wcskey='A')
    print('Removing any previous alternative WCS for image %s[%s]' %
          (img, ext))
    names = wcsnames(img, ext)
    for k, n in names.items():
        if k not in [' ', 'O']:
            deleteWCS(img, ext, wcskey=k, wcsname=n)

    with fits.open(img, mode='update') as hdu:
        hdu[ext].header['CD1_1'] = wcs.wcs.cd[0][0]
        hdu[ext].header['CD1_2'] = wcs.wcs.cd[0][1]
        hdu[ext].header['CD2_1'] = wcs.wcs.cd[1][0]
        hdu[ext].header['CD2_2'] = wcs.wcs.cd[1][1]
        hdu[ext].header['CRVAL1'] = wcs.wcs.crval[0]
        hdu[ext].header['CRVAL2'] = wcs.wcs.crval[1]
        hdu[ext].header['CRPIX1'] = wcs.wcs.crpix[0]
        hdu[ext].header['CRPIX2'] = wcs.wcs.crpix[1]
        hdu[ext].header['ORIENTAT'] = 0.0
        hdu[ext].header['WCSNAME'] = 'DRZWCS'
        del hdu[ext].header['LATPOLE']
        del hdu[ext].header['LONPOLE']
Пример #20
0
    def __init__(self, filename):
        if isinstance(filename, str):
            self.imghdu = fits.open(filename)
            self.imgname = filename
        else:
            self.imghdu = filename
            self.imgname = filename.filename()

        # Get header information to annotate the output catalogs
        if "total" in self.imgname:
            self.ghd_product = "tdp"
        else:
            self.ghd_product = "fdp"

        # Fits file read
        self.data = self.imghdu[('SCI', 1)].data
        self.wht_image = self.imghdu['WHT'].data.copy()

        # Get the HSTWCS object from the first extension
        self.imgwcs = HSTWCS(self.imghdu, 1)

        self.keyword_dict = self._get_header_data()

        self.bkg = None
Пример #21
0
def getFootprint(fitsfile):
    """Get footprint of image"""
    instdet = getInstDet(fitsfile)
    fin = fits.open(fitsfile)
    exts = sciexts[instdet]
    wcs = HSTWCS(fin, exts[0])
    points = wcs.calc_footprint()
    if len(exts) > 1:
        wcs = HSTWCS(fin, exts[1])
        points = np.vstack((points, wcs.calc_footprint()))
    else:
        points = wcs.calc_footprint()
    hull = ConvexHull(points)
    vertices = np.take(points, hull.vertices, axis=0).flatten().tolist()
    fltr = getFilter(fitsfile).upper()
    dsn = fitsfile[:9]
    with open(fitsfile.replace('.fits', '_footprint.reg'), 'w') as reg:
        reg.write('global color=green font="helvetica 8 normal" edit=1 move=1 delete=1 include=1 fixed=0\nfk5\n')
        if fltr in list(filters.keys()):
            c = filters[fltr]
        else:
            c = 'white'
        reg.write('polygon(%s) # color=%s text={%s}\n' % (str(vertices)[1:-1], c, dsn))
Пример #22
0
    def build(self, expnames, scale=False, scale_kw='EXPTIME'):
        """ Create mask showing where all input exposures overlap the footprint's WCS

        Notes
        -----
        This method populates the following attributes (initialized as all zeros):
          - total_mask : shows number of chips per pixel
          - scaled_mask : if computed, shows (by default) exposure time per pixel

        Parameters
        -----------
        expnames : list
            List of filenames for all input exposures that overlap the SkyFootprint WCS

        scale : bool, optional
            If specified, scale each chip by the value of the `scale_kw` keyword from the input exposure.

        scale_kw : str, optional
            If `scale` is `True`, get the scaling value from this keyword.  This keyword is assumed to be
            in the PRIMARY header.

        """
        for exposure in expnames:
            blank = np.zeros(self.meta_wcs.array_shape, dtype=np.int16)
            exp = fits.open(exposure)

            sci_extns = wcs_functions.get_extns(exp)
            for sci in sci_extns:
                wcs = HSTWCS(exp, ext=sci)
                edges_x = [0] * wcs.naxis2 + [wcs.naxis1 - 1
                                              ] * wcs.naxis2 + list(
                                                  range(wcs.naxis1)) * 2
                edges_y = list(range(wcs.naxis2)) * 2 + [0] * wcs.naxis1 + [
                    wcs.naxis2 - 1
                ] * wcs.naxis1

                sky_edges = wcs.pixel_to_world_values(
                    np.vstack([edges_x, edges_y]).T)
                meta_edges = self.meta_wcs.world_to_pixel_values(
                    sky_edges).astype(np.int32)
                # Account for rounding problems with creating meta_wcs
                meta_edges[:, 1] = np.clip(meta_edges[:, 1], 0,
                                           self.meta_wcs.array_shape[0] - 1)
                meta_edges[:, 0] = np.clip(meta_edges[:, 0], 0,
                                           self.meta_wcs.array_shape[1] - 1)

                # apply meta_edges to blank mask
                # Use PIL to create mask
                parray = np.array(meta_edges.T)
                polygon = list(zip(parray[0], parray[1]))
                nx = self.meta_wcs.array_shape[1]
                ny = self.meta_wcs.array_shape[0]
                img = Image.new('L', (nx, ny), 0)
                ImageDraw.Draw(img).polygon(polygon, outline=1, fill=1)
                blank = np.array(img)

                self.total_mask += blank.astype(np.int16)

                # Compute scaled mask if specified...
                if scale:
                    scale_val = fits.getval(exposure, scale_kw)
                    self.scaled_mask += blank.astype(np.int16) * scale_val
Пример #23
0
def cli(ctx, itype, ofile, ptask):
    """
    Runs superalign on catalogs for alignment
    """
    dsn = ctx.dataset_name
    ctx.log('Running task %s for dataset %s', task, dsn)
    procdir = os.path.join(ctx.rundir, dsn)
    os.chdir(procdir)
    cfgf = '%s_cfg.json' % dsn
    cfg = hutils.rConfig(cfgf)
    tcfg = cfg['tasks'][task] = {}
    tcfg['ptask'] = ptask
    tcfg['itype'] = itype
    tcfg['ofile'] = cfg['sfile'] = ofile
    tcfg['stime'] = ctx.dt()
    tcfg['completed'] = False

    images = hutils.imgList(cfg['images'])
    infiles = [str('%s%s' % (i, itype)) for i in images]
    refimg = cfg['refimg']
    refcat = cfg['refcat']
    refcat_sa = cfg['refcat_sa']
    refwcs = HSTWCS(fits.open(refimg))
    mkcat = make_catalog.MakeCat(refimg)

    sa_hlf = find_executable('superalign_hlfred')
    if not sa_hlf:
        ctx.elog(
            'Unable to find "superalign_hlfred" executable. Make sure it is in your PATH.'
        )
        sys.exit(1)

    ctx.vlog('Grouping images by visit for alignment')
    visits = set([i[:6] for i in infiles])
    vdata = {}
    for v in visits:
        vdata[v] = []
        for e in infiles:
            if e[:6] == v:
                vdata[v].append(e)

    # Run superalign on all visits
    ctx.vlog('Generating the superalign input')
    with open('superalign_failed_visits.txt', 'w') as sfv:
        for visit, inf in vdata.items():
            super_align.makeSAin(visit, inf, refwcs, refcat_sa)
            try:
                sa_cmd = '%s %s_superalign.in %s_sources.cat %s_offsets.cat' % (
                    sa_hlf, visit, visit, visit)
                ctx.vlog('Running: %s', sa_cmd)
                ecode = super_align.runSuperAlign(sa_cmd)
                if ecode != 0:
                    print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
                    print('Superalign FAILED on %s' % visit)
                    print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
                    sfv.write('%s\n' % visit)
                else:
                    print('----------------------------------')
                    print('Completed superalign on %s' % visit)
                    print('----------------------------------')
                super_align.makeSourceCat(visit, refwcs)
                ctx.vlog('Running simplematch')
                super_align.runSimpleMatch(visit)
                ctx.vlog('Running applyshifts')
                super_align.applyShiftsSA(visit)

            except Exception as e:
                sfv.close()
                hutils.wConfig(cfg, cfgf)
                print(e)
                raise

    # Refine the shifts with MCMC
    ctx.vlog('Refining shifts...')
    with open('mcmc_shifts.txt', 'a') as ms:
        for drz in infiles:
            super_align.stars2cat(drz, refwcs)
        for visit, inf in vdata.items():
            ctx.vlog('Generating matched catalogs for visit %s', visit)
            for c1 in glob.glob('%s???_drz_sci_sa.cat.stars.cat' % visit):
                c2 = refcat
                c1m = c1.replace('.cat.stars.cat', '_match.cat')
                c2m = c1.replace('.cat.stars.cat', '_ref_match.cat')
                super_align.nn_match(c1, c2, refwcs, c1m, c2m)
        for drz in infiles:
            with fits.open(drz) as hdu:
                wn = hdu[0].header['wcsname']
            if wn == 'DRZWCS_1':
                ctx.vlog('Refining shifts for %s' % drz)
                offset = super_align.refineShiftMCMC(drz)
                dxp, dyp, dtp = offset
                ms.write('%s %.3f %.3f %.3f\n' % (drz, dxp, dyp, dtp))
                ctx.vlog('Generating catalogs for checking alignment for %s',
                         drz)
                whtf = drz.replace('sci', 'wht')
                instdet = hutils.getInstDet(drz)
                micat = drz.replace('.fits', '_all.cat')
                os.rename(micat, micat.replace('_all.cat', '_all_orig.cat'))
                omrcat = drz.replace('.fits', '_ref.cat')
                os.rename(omrcat, omrcat.replace('_ref.cat', '_ref_orig.cat'))
                os.rename(drz.replace('.fits', '.cat'),
                          drz.replace('.fits', '_orig.cat'))
                mkcat.makeCat(drz, instdet, weightfile=whtf)
                omicat = drz.replace('.fits', '.cat')
                # create new catalogs with only maching pairs
                hutils.nn_match_radec(micat, refcat, omicat, omrcat)

    ctx.vlog('Calculating total shifts from shift files...')

    sasd = {}
    with open('sa_shifts.txt') as sas:
        sasl = [i.split() for i in sas.read().splitlines()]
    for sasi in sasl:
        ffn, sdx, sdy, sdt = sasi
        sasd[ffn] = [float(sdx), float(sdy), float(sdt)]

    mcsd = {}
    with open('mcmc_shifts.txt') as mcs:
        mcsl = [i.split() for i in mcs.read().splitlines()]
    for mcsi in mcsl:
        ffn, mdx, mdy, mdt = mcsi
        mcsd[ffn] = [float(mdx), float(mdy), float(mdt)]

    with open('{}_total_shifts.txt'.format(dsn), 'w') as tsf:
        for ff, sas in sasd.items():
            mcs = mcsd[ff]
            tsh = [sum(i) for i in zip(sas, mcs)]
            ash = sas + mcs + tsh
            tshifts = [ff] + ash
            tsf.write(
                '{} {: .3f} {: .3f} {: .3f} {: .3f} {: .3f} {: .3f} {: .3f} {: .3f} {: .3f}\n'
                .format(*tshifts))

    tcfg['etime'] = ctx.dt()
    tcfg['completed'] = True
    ctx.vlog('Writing configuration file %s for %s task', cfgf, task)
    hutils.wConfig(cfg, cfgf)
Пример #24
0
def _align_1image(resample,
                  image,
                  image_ext,
                  primary_cutouts,
                  seg,
                  image_sky=None,
                  wcslin=None,
                  fitgeom='general',
                  nclip=3,
                  sigma=3.0,
                  use_weights=True,
                  cc_type='NCC',
                  combine_seg_mask=True):
    img_info = {
        'file_name': image,
        'wcs_info': [],  # a list of: [extension, original WCS, corrected WCS]
        'fits_ext': [],  # image ext. from which an image cutout was extracted
        'image_cutouts': [],
        'driz_cutouts': [],
        'blotted_cutouts': [],  # non-shifted blot images of drizzled cutouts
        'ICC': []  # interlaced (oversampled) cross-correlation images
    }

    drz_sci_fname, drz_sci_ext = parse_file_name(resample.output_sci)
    with fits.open(drz_sci_fname) as hdulist:
        drz_sci = hdulist[drz_sci_ext].data
        drz_wcs = HSTWCS(hdulist, ext=drz_sci_ext)
        if 'EXPTIME' in hdulist[drz_sci_ext].header:
            drz_exptime = hdulist[drz_sci_ext].header['EXPTIME']
        else:
            drz_exptime = hdulist[0].header['EXPTIME']
        drz_units = hdulist[drz_sci_ext].header['BUNIT']
        drz_units = 'rate' if '/' in drz_units else 'counts'

    # get image data, info and create cutouts:
    with fits.open(image) as hdulist:
        for ext in image_ext:
            img_sci = hdulist[ext].data
            img_wcs = HSTWCS(hdulist, ext=ext)
            orig_img_wcs = img_wcs.deepcopy()
            img_exptime = hdulist[0].header['EXPTIME']
            img_units = hdulist[ext].header['BUNIT']
            img_units = 'rate' if '/' in img_units else 'counts'

            if image_sky is not None and image_sky[ext] is not None:
                img_sci -= image_sky[ext]

            imgct_ext, drzct_ext = cutout.create_cutouts(
                primary_cutouts,
                seg,
                drz_sci,
                drz_wcs,
                img_sci,
                img_wcs,
                drz_data_units=drz_units,
                drz_exptime=drz_exptime,
                flt_data_units=img_units,
                flt_exptime=img_exptime,
                combine_seg_mask=combine_seg_mask)

            img_info['wcs_info'].append([ext, orig_img_wcs, img_wcs])
            img_info['fits_ext'].extend(len(imgct_ext) * [ext])
            img_info['image_cutouts'].extend(imgct_ext)
            img_info['driz_cutouts'].extend(drzct_ext)

    # find linear fit:
    fit, interlaced_cc, nonshifted_blts = find_linear_fit(
        img_cutouts=imgct_ext,
        drz_cutouts=drzct_ext,
        wcslin=wcslin,
        fitgeom=fitgeom,
        nclip=nclip,
        sigma=sigma,
        use_weights=use_weights,
        cc_type=cc_type)

    img_info['blotted_cutouts'].extend(nonshifted_blts)
    img_info['ICC'].extend(interlaced_cc)

    print("\nComputed '{:s}' fit for image {:s}:".format(fitgeom, image))

    if fitgeom == 'shift':
        print("XSH: {:.4f}  YSH: {:.4f}".format(fit['offset'][0],
                                                fit['offset'][1]))

    elif fitgeom == 'rscale' and fit['proper']:
        print(
            "XSH: {:.4f}  YSH: {:.4f}    ROT: {:.10g}    SCALE: {:.6f}".format(
                fit['offset'][0], fit['offset'][1], fit['rot'],
                fit['scale'][0]))

    elif (fitgeom == 'general' or (fitgeom == 'rscale' and not fit['proper'])):
        print("XSH: {:.4f}  YSH: {:.4f}    PROPER ROT: {:.10g}    ".format(
            fit['offset'][0], fit['offset'][1], fit['rot']))

        print("<ROT>: {:.10g}  SKEW: {:.10g}    ROT_X: {:.10g}  "
              "ROT_Y: {:.10g}".format(fit['rotxy'][2], fit['skew'],
                                      fit['rotxy'][0], fit['rotxy'][1]))

        print("<SCALE>: {:.10g}  SCALE_X: {:.10g}  SCALE_Y: {:.10g}".format(
            fit['scale'][0], fit['scale'][1], fit['scale'][2]))

    print('FIT RMSE: {:.3g}    FIT MAE: {:.3g}    IMAGE FIT RMSE: {:.3g}\n'.
          format(fit['rmse'], fit['mae'], fit['irmse']))
    nmatch = fit['resids'].shape[0]
    print('Final solution based on {:d} objects.'.format(nmatch))

    # correct WCS:
    for ext, owcs, wcs in img_info['wcs_info']:
        correct_wcs(imwcs=wcs,
                    wcslin=drz_wcs,
                    rotmat=fit['matrix'],
                    shifts=fit['offset'],
                    fitgeom=fitgeom)

        print("\n------- ORIGINAL WCS for '{:s}[{}]': ------".format(
            image, ext))
        print(owcs)

        print("\n------- CORRECTED WCS for '{:s}[{}]': ------".format(
            image, ext))
        print(wcs)

    return fit, img_info
Пример #25
0
    def _a_blot_image(self, image_to_blot, tempname, x_excess, y_excess,
                      interp):
        """
        Blot one image.

        Thats just a simple wrapper around the task blot in astrodrizzle

        Parameters
        ----------
        image_to_blot: str
            the input image name, either the grism or direct drizzled image

        tempname: str
            the name of the output blotted image

        """
        # the drizzle coeff information for adriz is taken
        # from the self.data image,
        excess_x = 0
        excess_y = 0

        # use the current data as reference image for output
        # self.data comes from the header of the input grism or flux image
        # and is one of the input images used to make the drizzled
        # image_to_blot
        input_image = (self.data).split("[")[0]
        bunit = fits.getval(image_to_blot, 'BUNIT')

        flt_header = fits.getheader(input_image)
        flt_wcs = HSTWCS(self.data)

        # now look at the image to blot, this is a drizzled image
        ftype = fileutil.isFits(image_to_blot)[1]

        if (ftype == 'mef'):
            blot_wcs = HSTWCS(image_to_blot,
                              ext=(str(self.fcube_info["ext_nam"]),
                                   str(self.fcube_info["ext_ver"])))
            image_data = fits.getdata(image_to_blot,
                                      extname=str(self.fcube_info["ext_nam"]),
                                      extver=int(self.fcube_info["ext_nam"]))

        elif (ftype is 'simple'):
            blot_wcs = HSTWCS(image_to_blot)  # assume simple
            image_data = fits.getdata(image_to_blot)

        else:
            return IOError("File type of fits image is not "
                           "supported {0:s}".format(image_to_blot))

        # edit the wcs header information to add any dim_info shifts that
        # we need, expanding the size of the output image
        # make sure this gets saved to the output extension header.
        # The lambda image will be bigger than the segment image
        if x_excess > 0:
            excess_x = int(flt_wcs.naxis1 + x_excess * 2.)
            flt_wcs.naxis1 = excess_x
            crpix = flt_wcs.wcs.crpix
            newx = int(crpix[0]) + x_excess
            flt_wcs.wcs.crpix = np.array([newx, int(crpix[1])])
            flt_wcs.sip.crpix[0] = newx

        if y_excess > 0:
            excess_y = int(flt_wcs.naxis2 + y_excess * 2.)
            flt_wcs.naxis2 = excess_y
            crpix = flt_wcs.wcs.crpix
            newy = int(crpix[1]) + y_excess
            flt_wcs.wcs.crpix = np.array([int(crpix[0]), newy])
            flt_wcs.sip.crpix[1] = newy

        # outimage is just the data array
        outimage = astrodrizzle.ablot.do_blot(image_data.astype(np.float32),
                                              blot_wcs,
                                              flt_wcs,
                                              1.,
                                              interp=interp,
                                              sinscl=1.,
                                              coeffs=True,
                                              wcsmap=None,
                                              stepsize=10)

        # update the flt_header with the flt_wcs information I created
        flt_header['CRPIX1'] = flt_wcs.wcs.crpix[0]
        flt_header['CRPIX2'] = flt_wcs.wcs.crpix[1]

        try:
            newimage = fits.PrimaryHDU()
            newimage.data = outimage
            newimage.header = flt_header
            newimage.header['BUNIT'] = bunit
            newimage.header.update(flt_wcs.to_header())
            newimage.verify('silentfix')
            newimage.writeto(tempname)
        except:
            raise IOError("Problem writing fits image {0:s}".format(tempname))
Пример #26
0
def create_sextractor_like_sourcelists(source_filename,
                                       catalog_filename,
                                       param_dict,
                                       se_debug=False):
    """Use photutils to find sources in image based on segmentation.

    Parameters
    ----------
    source_filename : string
        Filename of the "white light" drizzled image (aka the total detection product) which
        is used for the detection of sources

    catalog_filename : string
        Name of the output source catalog for the total detection product

    param_dict : dictionary
        dictionary of drizzle, source finding, and photometric parameters

    se_debug : bool, optional
        Specify whether or not to plot the image and segmentation image for
        visualization and debugging purposes

    Returns
    -------
    segm : `photutils.segmentation.SegmentationImage`
        Two-dimensional segmentation image where found source regions are labeled with
        unique, non-zero positive integers.

    kernel :

    bkg : `~photutils.background.Background2D` or None
        A background map based upon the `~photutils.background.SExtractorBackground`
        estimator

    bkg_rms_mean : float
        Mean bkg.background FIX

    """

    # Open the "white light" image and get the SCI image data
    imghdu = fits.open(source_filename)
    imgarr = imghdu['sci', 1].data

    # Get the HSTWCS object from the first extension
    imgwcs = HSTWCS(imghdu, 1)

    # Get header information to annotate the output catalogs
    keyword_dict = _get_header_data(imghdu)

    # Get the instrument/detector-specific values from the param_dict
    fwhm = param_dict["sourcex"]["fwhm"]
    size_source_box = param_dict["sourcex"]["source_box"]
    threshold_flag = param_dict["sourcex"]["thresh"]

    # Report configuration values to log
    log.info("{}".format("=" * 80))
    log.info("")
    log.info(
        "SExtractor-like source finding settings for Photutils segmentation")
    log.info("Total Detection Product - Input Parameters")
    log.info("FWHM: {}".format(fwhm))
    log.info("size_source_box: {}".format(size_source_box))
    log.info("threshold_flag: {}".format(threshold_flag))
    log.info("")
    log.info("{}".format("=" * 80))

    # Only use a single kernel for now
    kernel_list = [Gaussian2DKernel, MexicanHat2DKernel]
    kernel_in_use = kernel_list[0]

    bkg, bkg_dao_rms, threshold = _compute_background(
        imgarr, nsigma=5., threshold_flag=threshold_flag)

    # FIX imgarr should be background subtracted, sextractor uses the filtered_data image
    imgarr_bkgsub = imgarr - bkg.background

    # *** FIX: should size_source_box size be used in all these places? ***
    # Create a 2D filter kernel - this will be used to smooth the input
    # image prior to thresholding in detect_sources().
    sigma = fwhm * gaussian_fwhm_to_sigma
    kernel = kernel_in_use(sigma,
                           x_size=size_source_box,
                           y_size=size_source_box)
    kernel.normalize()

    # Source segmentation/extraction
    # If the threshold includes the background level, then the input image
    # should NOT be background subtracted.
    # Note: SExtractor has "connectivity=8" which is the default for this function
    segm = detect_sources(imgarr,
                          threshold,
                          npixels=size_source_box,
                          filter_kernel=kernel)

    # For debugging purposes...
    if se_debug:
        # Write out a catalog which can be used as an overlay for image in ds9
        cat = source_properties(imgarr_bkgsub,
                                segm,
                                background=bkg.background,
                                filter_kernel=kernel,
                                wcs=imgwcs)
        table = cat.to_table()

        # Copy out only the X and Y coordinates to a "debug table" and
        # cast as an Astropy Table
        tbl = Table(table["xcentroid", "ycentroid"])

        # Construct the debug output filename and write the catalog
        indx = catalog_filename.find("ecsv")
        outname = catalog_filename[0:indx] + "reg"

        tbl["xcentroid"].info.format = ".10f"  # optional format
        tbl["ycentroid"].info.format = ".10f"

        # Add one to the X and Y table values to put the data onto a one-based system,
        # particularly for display with DS9
        tbl["xcentroid"] = tbl["xcentroid"] + 1
        tbl["ycentroid"] = tbl["ycentroid"] + 1
        tbl.write(outname, format="ascii.commented_header")
        log.info("Wrote debug source catalog: {}".format(outname))
        """
        # Generate a graphic of the image and the segmented image
        norm = ImageNormalize(stretch=SqrtStretch())
        fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(10, 12.5))
        ax1.imshow(imgarr, origin="lower", cmap="Greys_r", norm=norm)
        ax1.set_title("Data")
        ax2.imshow(segm, origin="lower", cmap=segm.cmap(random_state=12345))
        ax2.set_title("Segmentation Image")
        plt.show()
        """

    # TROUBLESOME at this time
    # Deblending is a combination of multi-thresholding and watershed
    # segmentation. Sextractor uses a multi-thresholding technique.
    # npixels = number of connected pixels in source
    # npixels and filter_kernel should match those used by detect_sources()
    # Note: SExtractor has "connectivity=8" which is the default for this function
    """
    segm = deblend_sources(imgarr, segm, npixels=size_source_box,
                           filter_kernel=kernel, nlevels=32,
                           contrast=0.005)
    print("after deblend. ", segm)
    """
    """
    if se_debug:
        # Generate a graphic of the image and the segmented image
        norm = ImageNormalize(stretch=SqrtStretch())
        fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(10, 12.5))
        ax1.imshow(imgarr, origin="lower", cmap="Greys_r", norm=norm)
        ax1.set_title("Data")
        ax2.imshow(segm, origin="lower", cmap=segm.cmap(random_state=12345))
        ax2.set_title("Segmentation Image")
        plt.show()
    """

    # Regenerate the source catalog with presumably now only good sources
    seg_cat = source_properties(imgarr_bkgsub,
                                segm,
                                background=bkg.background,
                                filter_kernel=kernel,
                                wcs=imgwcs)

    _write_catalog(seg_cat, keyword_dict, catalog_filename)

    return segm, kernel, bkg_dao_rms
Пример #27
0
def measure_source_properties(segm, kernel, source_filename, catalog_filename,
                              param_dict):
    """Use the positions of the sources identified in the white light image to
    measure properties of these sources in the filter images

    An instrument/detector combination may have multiple filter-level products.
    This routine is called for each filter image which is then measured to generate
    a filter-level source catalog based on object positions measured in the total
    detection product image.

    Parameters
    ----------
    segm : `~astropy.photutils.segmentation` Segmentation image
        Two-dimensional image of labeled source regions based on the "white light" drizzed product

    kernel : `~astropy.convolution`
        Two dimensional function of a specified FWHM used to smooth the image and
        used in the detection of sources as well as for the determination of the
        source properties (this routine)

    source_filename : string
        Filename of the filter drizzled image (aka the filter detection product) which
        is used for the measurement of properties of the previously found sources

    catalog_filename : string
        Name of the output source catalog for the filter detection product

    param_dict : dictionary
        dictionary of drizzle, source finding, and photometric parameters

    Returns
    -------

    """

    # Open the filter-level image
    imghdu = fits.open(source_filename)
    imgarr = imghdu['sci', 1].data

    # Get the HSTWCS object from the first extension
    imgwcs = HSTWCS(imghdu, 1)

    # Get header information to annotate the output catalogs
    keyword_dict = _get_header_data(imghdu, product="fdp")

    # Get the instrument/detector-specific values from the param_dict
    fwhm = param_dict["sourcex"]["fwhm"]
    size_source_box = param_dict["sourcex"]["source_box"]
    threshold_flag = param_dict["sourcex"]["thresh"]

    # Report configuration values to log
    log.info("{}".format("=" * 80))
    log.info("")
    log.info(
        "SExtractor-like source property measurements based on Photutils segmentation"
    )
    log.info("Filter Level Product - Input Parameters")
    log.info("FWHM: {}".format(fwhm))
    log.info("size_source_box: {}".format(size_source_box))
    log.info("threshold_flag: {}".format(threshold_flag))
    log.info("")
    log.info("{}".format("=" * 80))

    # The data needs to be background subtracted when computing the source properties
    bkg, _, _ = _compute_background(imgarr,
                                    nsigma=5.,
                                    threshold_flag=threshold_flag)

    imgarr_bkgsub = imgarr - bkg.background

    # Compute source properties...
    seg_cat = source_properties(imgarr_bkgsub,
                                segm,
                                background=bkg.background,
                                filter_kernel=kernel,
                                wcs=imgwcs)
    print(Table(seg_cat.to_table()).colnames)

    # Write the source catalog
    _write_catalog(seg_cat, keyword_dict, catalog_filename, product="fdp")
Пример #28
0
class MakeCat(object):
    def __init__(self, refimg):
        super(MakeCat, self).__init__()
        self.refimg = str(refimg)
        self.refwcs = HSTWCS(self.refimg)
        
    def getInstDet(self, imgfile):
        """Get the instrument/detector of an HST image (e.g. acswfc)"""
        hdr = fits.getheader(imgfile)
        return '%s%s' % (hdr['instrume'].lower(), hdr['detector'].lower())

    def findSources(self, inputfile, outputfile, instdet, weightfile=None, extref=False, **sconfig):
        """Finds objects in image"""
        # Set up SExtractor
        sex = sextractor.SExtractor()
        # Load the default configuration
        if instdet == 'acswfc' or instdet == 'wfc3uvis':
            for k,v in _sex_config_ACSWFC.iteritems():
                sex.config[k] = v
        if instdet == 'wfc3ir':
            for k,v in _sex_config_WFC3IR.iteritems():
                sex.config[k] = v
        if sconfig:
            # Load any runtime configuration
            for k,v in sconfig.iteritems():
                sex.config[k] = v
        if weightfile:
            sex.config['WEIGHT_IMAGE'] = weightfile
            sex.config['WEIGHT_TYPE'] = 'MAP_WEIGHT'
            sex.config['WEIGHT_GAIN'] = 'N'
        sex.config['CATALOG_NAME'] = outputfile
        # Load default parameters'
        sex.config['PARAMETERS_LIST'] = []
        for p in _sex_parms:
            sex.config['PARAMETERS_LIST'].append(p)
        # Run SExtractor
        sex.run(inputfile)
        cfg = _sa_config[instdet]
        low_limit = cfg['low_limit']
        hi_limit = cfg['hi_limit']
        objectlist = []
        for l in [i.split() for i in open(outputfile).readlines()]:
            if l[0] != '#':
                x = float(l[1])
                y = float(l[2])
                ra = float(l[3])
                if l[4].startswith('+'):
                    dec = float(l[4][1:])
                else:
                    dec = float(l[4])
                aa = float(l[5])
                ba = float(l[6])
                r = ba / aa
                m = float(l[7])
                f = float(l[8])
                fwhm = float(l[9])

                if min(2.3 * ba, fwhm) >= low_limit and max(2.3 * aa, fwhm) < hi_limit and r > cfg['min_axis_ratio']:
                    objectlist.append(Object(x, y, ra, dec, r, m, f))
                    
        return objectlist

    def removeCloseSources(self, objectlist):
        """Removes objects from catalog with multiple close detections"""
        for objecti in objectlist:
            for objectj in objectlist:
                dist = ((objecti.x - objectj.x) ** 2 + (objecti.y - objectj.y) ** 2) ** 0.5
                if dist < 10 and dist > 0:
                    if objecti.mag < objectj.mag:
                        objectj.nextToBig = 1
                    else:
                        objecti.nextToBig = 1
        objectlist_keep = []
        for objecti in objectlist:
            if not objecti.nextToBig:
                objectlist_keep.append(objecti)
            else:
                print 'Excluding object at %i %i' % (objecti.x, objecti.y)
        return objectlist_keep
    
    
    def makeCat(self, imgfile, instdet, weightfile=None, extref=False):
        """Makes a catalog of objects to be used for input to superalign and creates a DS9 region file of objects"""
        
        imgfile_cat = '%s.cat' % imgfile.replace('.fits', '')
        imgfile_reg = '%s.reg' % imgfile.replace('.fits', '')
    
        o_radec = []
        ext = 0
        objectlist = self.findSources('%s[%s]' % (imgfile, ext), imgfile_cat, instdet, weightfile, extref=extref)
        cleanobjectlist = self.removeCloseSources(objectlist)
        print 'Found %s sources' % len(cleanobjectlist)
        wcs = HSTWCS(str(imgfile))
        for obj in cleanobjectlist:
            sky = wcs.all_pix2world(np.array([[obj.x, obj.y]]), 1)
            o_radec.append([obj.ra[0], obj.dec[0]])
            obj.ra = sky[0][0]
            obj.dec = sky[0][1]
        
        # Write out a ds9 region file of object selected for alignment
        regout = open(imgfile_reg, 'w')
        regout.write('global color=green font="helvetica 8 normal" edit=1 move=1 delete=1 include=1 fixed=0\nfk5\n')
        for i,rd in enumerate(o_radec):
            oid = i+1
            regout.write('circle(%s,%s,%s") # color=%s text={%s}\n' % (rd[0], rd[1], 0.5, 'red', oid))
        regout.close()
    
        # Now we need to write out the catalog in the reference image coords in arcseconds with respect to center of the image
        catout = open(imgfile_cat, 'w')
        for i,obj in enumerate(cleanobjectlist):
            oid = i+1
            catout.write('%i %.9f %.9f %.4f %.4f %.4f\n' % (oid, obj.ra, obj.dec, obj.x, obj.y, obj.mag))
        catout.close()
        return
    
    def makeSACat(self, imgfile, extref=False):
        """
        Makes a catalog of objects to be used for input to superalign from a external reference catalog.
        Catalog should be of form: ID RA(deg) Dec(deg) Mag
        Output appends _sa
        """
        if extref:
            wcs = self.refwcs
        else:
            wcs = HSTWCS(imgfile)
        cat = imgfile.replace('.fits', '.cat')
        outcat = imgfile.replace('.fits', '_sa.cat')
        data = ascii.read(cat, names=['id', 'ra', 'dec', 'x', 'y', 'mag'])
        arcs = (wcs.all_world2pix(zip(data['ra'], data['dec']), 1) - [wcs.naxis1/2, wcs.naxis2/2])*wcs.pscale
        ascii.write([data['id'], arcs[:,0], arcs[:,1], data['mag']], outcat, format='no_header')
        return
    
    def makeSACatExtRef(self, refcat, outcat):
        """
        Makes a catalog of objects to be used for input to superalign from a external reference catalog.
        Catalog should be of form: ID RA(deg) Dec(deg) Mag
        """
        data = ascii.read(refcat, names=['id', 'ra', 'dec', 'x', 'y', 'mag'])
        arcs = (self.refwcs.all_world2pix(zip(data['ra'], data['dec']), 1) - [self.refwcs.naxis1/2, self.refwcs.naxis2/2])*self.refwcs.pscale
        ascii.write([data['id'], arcs[:,0], arcs[:,1], data['mag']], outcat, format='no_header')
        return
Пример #29
0
    def _a_blot_segment_image(self, image_to_blot, tempname, x_excess,
                              y_excess, interp):
        """Blot the segmentation or other nondrizzled image as if it were
        assume self.grism_image is always used as the source wcs reference
        Thats just a simple wrapper around the task blot in astrodrizzle

        Parameters
        ----------
        image_to_blot: str
            the input image name, either the grism or direct drizzled image
        tempname: str
            the name of the output blotted image

        Notes
        -----
        exposure time is hard coded to 1 since it was made from the
        drizzled image and we dont want the id numbers rescaled by the
        exposure time that was used for blotting

        """
        excess_x = 0
        excess_y = 0

        # the drizzle coeff information for adriz is taken
        # from the self.data image
        # use the current data as reference image
        input_image = (self.data).split("[")[0]
        flt_header = fits.getheader(input_image)
        flt_wcs = HSTWCS(self.data)

        # check to see if this is a simple fits or MEF and grab
        # the science information.
        ftype = fileutil.isFits(self.grism_image_name)[1]

        if (ftype is 'mef'):
            grism_wcs = HSTWCS(self.grism_image_name,
                               ext=(str(self.fcube_info["ext_nam"]),
                                    self.fcube_info["ext_ver"]))
        elif (ftype is 'simple'):
            grism_wcs = HSTWCS(self.grism_image_name)
        else:
            return IOError("File type of fits image is not "
                           "supported {0:s}".format(image_to_blot))

        ftype = fileutil.isFits(image_to_blot)[1]
        if (ftype is 'mef'):
            image_data = fits.getdata(image_to_blot,
                                      ext=(str(self.fcube_info["ext_nam"]),
                                           self.fcube_info["ext_ver"]))
        elif (ftype is 'simple'):
            image_data = fits.getdata(image_to_blot)
        else:
            return IOError("Input image is not a supported FITS "
                           "type: {0:s}".format(image_to_blot))

        # edit the wcs header information to add any dim_info shifts that we
        # need the segment image needs to be the same sky area cut without
        # the added pixels
        type(x_excess)
        if x_excess > 0:
            excess_x = int(flt_wcs.naxis1 + x_excess * 2.)
            flt_wcs.naxis1 = excess_x
            crpix = flt_wcs.wcs.crpix
            newx = int(crpix[0]) + x_excess
            flt_wcs.wcs.crpix = np.array([newx, crpix[1]])
            flt_wcs.sip.crpix[0] = newx

        if y_excess > 0:
            excess_y = int(flt_wcs.naxis2 + y_excess * 2.)
            flt_wcs.naxis2 = excess_y
            crpix = flt_wcs.wcs.crpix
            newy = int(crpix[1]) + y_excess
            flt_wcs.wcs.crpix = np.array([int(crpix[0]), newy])
            flt_wcs.sip.crpix[1] = newy

        # returns a numpy.ndarray which is just the data
        outimage = astrodrizzle.ablot.do_blot(image_data.astype(np.float32),
                                              grism_wcs,
                                              flt_wcs,
                                              1.,
                                              interp=interp,
                                              sinscl=1.,
                                              coeffs=True,
                                              wcsmap=None,
                                              stepsize=10)

        # update the flt_header with the flt_wcs information I created
        flt_header['CRPIX1'] = flt_wcs.wcs.crpix[0]
        flt_header['CRPIX2'] = flt_wcs.wcs.crpix[1]

        # if the input flt was an MEF we need to write an MEF out
        try:
            newimage = fits.PrimaryHDU()
            newimage.data = outimage
            newimage.header = flt_header
            newimage.header.update(flt_wcs.to_header())
            newimage.verify('silentfix')
            newimage.writeto(tempname)
        except:
            raise IOError("Problem writing fits image {0:s}".format(tempname))
Пример #30
0
class MakeCat(object):
    def __init__(self, refimg):
        super(MakeCat, self).__init__()
        self.refimg = str(refimg)
        self.refwcs = HSTWCS(self.refimg)

    def getInstDet(self, imgfile):
        """Get the instrument/detector of an HST image (e.g. acswfc)"""
        hdr = fits.getheader(imgfile)
        return '%s%s' % (hdr['instrume'].lower(), hdr['detector'].lower())

    def findSources(self,
                    inputfile,
                    outputfile,
                    instdet,
                    weightfile=None,
                    extref=False,
                    **sconfig):
        """Finds objects in image"""
        # Set up SExtractor
        sex = sextractor.SExtractor()
        # Load the default configuration
        if instdet == 'acswfc' or instdet == 'wfc3uvis':
            for k, v in _sex_config_ACSWFC.items():
                sex.config[k] = v
        if instdet == 'wfc3ir':
            for k, v in _sex_config_WFC3IR.items():
                sex.config[k] = v
        if sconfig:
            # Load any runtime configuration
            for k, v in sconfig.items():
                sex.config[k] = v
        if weightfile:
            sex.config['WEIGHT_IMAGE'] = weightfile
            sex.config['WEIGHT_TYPE'] = 'MAP_WEIGHT'
            sex.config['WEIGHT_GAIN'] = 'N'
        sex.config['CATALOG_NAME'] = outputfile
        # Load default parameters'
        sex.config['PARAMETERS_LIST'] = []
        for p in _sex_parms:
            sex.config['PARAMETERS_LIST'].append(p)
        # Run SExtractor
        sex.run(inputfile)
        cfg = _sa_config[instdet]
        low_limit = cfg['low_limit']
        hi_limit = cfg['hi_limit']
        objectlist = []
        for l in [i.split() for i in open(outputfile).readlines()]:
            if l[0] != '#':
                x = float(l[1])
                y = float(l[2])
                ra = float(l[3])
                if l[4].startswith('+'):
                    dec = float(l[4][1:])
                else:
                    dec = float(l[4])
                aa = float(l[5])
                ba = float(l[6])
                r = ba / aa
                m = float(l[7])
                f = float(l[8])
                fwhm = float(l[9])

                if min(2.3 * ba, fwhm) >= low_limit and max(
                        2.3 * aa,
                        fwhm) < hi_limit and r > cfg['min_axis_ratio']:
                    objectlist.append(Object(x, y, ra, dec, r, m, f))

        return objectlist

    def removeCloseSources(self, objectlist):
        """Removes objects from catalog with multiple close detections"""
        for objecti in objectlist:
            for objectj in objectlist:
                dist = ((objecti.x - objectj.x)**2 +
                        (objecti.y - objectj.y)**2)**0.5
                if dist < 10 and dist > 0:
                    if objecti.mag < objectj.mag:
                        objectj.nextToBig = 1
                    else:
                        objecti.nextToBig = 1
        objectlist_keep = []
        for objecti in objectlist:
            if not objecti.nextToBig:
                objectlist_keep.append(objecti)
            else:
                print('Excluding object at %i %i' % (objecti.x, objecti.y))
        return objectlist_keep

    def makeCat(self, imgfile, instdet, weightfile=None, extref=False):
        """Makes a catalog of objects to be used for input to superalign and creates a DS9 region file of objects"""

        imgfile_cat = '%s_all.cat' % imgfile.replace('.fits', '')
        imgfile_reg = '%s_all.reg' % imgfile.replace('.fits', '')

        o_radec = []
        ext = 0
        objectlist = self.findSources('%s[%s]' % (imgfile, ext),
                                      imgfile_cat,
                                      instdet,
                                      weightfile,
                                      extref=extref)
        cleanobjectlist = self.removeCloseSources(objectlist)
        print('Found %s sources' % len(cleanobjectlist))
        wcs = HSTWCS(str(imgfile))
        for obj in cleanobjectlist:
            sky = wcs.all_pix2world(np.array([[obj.x, obj.y]]), 1)
            o_radec.append([obj.ra[0], obj.dec[0]])
            obj.ra = sky[0][0]
            obj.dec = sky[0][1]

        # Write out a ds9 region file of object selected for alignment
        regout = open(imgfile_reg, 'w')
        regout.write(
            'global color=green font="helvetica 8 normal" edit=1 move=1 delete=1 include=1 fixed=0\nfk5\n'
        )
        for i, rd in enumerate(o_radec):
            oid = i + 1
            regout.write('circle(%s,%s,%s") # color=%s text={%s}\n' %
                         (rd[0], rd[1], 0.5, 'red', oid))
        regout.close()

        # Now we need to write out the catalog in the reference image coords in arcseconds with respect to center of the image
        catout = open(imgfile_cat, 'w')
        for i, obj in enumerate(cleanobjectlist):
            oid = i + 1
            catout.write('%i %.9f %.9f %.4f %.4f %.4f\n' %
                         (oid, obj.ra, obj.dec, obj.x, obj.y, obj.mag))
        catout.close()
        return

    def makeSACat(self, imgfile, extref=False):
        """
        Makes a catalog of objects to be used for input to superalign from a external reference catalog.
        Catalog should be of form: ID RA(deg) Dec(deg) Mag
        Output appends _sa
        """
        if extref:
            wcs = self.refwcs
        else:
            wcs = HSTWCS(imgfile)
        cat = imgfile.replace('.fits', '.cat')
        outcat = imgfile.replace('.fits', '_sa.cat')
        data = ascii.read(cat, names=['id', 'ra', 'dec', 'x', 'y', 'mag'])
        arcs = (wcs.all_world2pix(list(zip(data['ra'], data['dec'])), 1) -
                [wcs.naxis1 / 2, wcs.naxis2 / 2]) * wcs.pscale
        ascii.write([data['id'], arcs[:, 0], arcs[:, 1], data['mag']],
                    outcat,
                    format='no_header',
                    overwrite=True)
        return

    def makeSACatExtRef(self, refcat, outcat):
        """
        Makes a catalog of objects to be used for input to superalign from a external reference catalog.
        Catalog should be of form: ID RA(deg) Dec(deg) Mag
        """
        data = ascii.read(refcat, names=['id', 'ra', 'dec', 'x', 'y', 'mag'])
        arcs = (
            self.refwcs.all_world2pix(list(zip(data['ra'], data['dec'])), 1) -
            [self.refwcs.naxis1 / 2, self.refwcs.naxis2 / 2
             ]) * self.refwcs.pscale
        ascii.write([data['id'], arcs[:, 0], arcs[:, 1], data['mag']],
                    outcat,
                    format='no_header',
                    overwrite=True)
        return
Пример #31
0
def makecorr(fname, allowed_corr):
    """
    Purpose
    =======
    Applies corrections to the WCS of a single file

    :Parameters:
    `fname`: string
             file name
    `acorr`: list
             list of corrections to be applied
    """
    logger.info("Allowed corrections: {0}".format(allowed_corr))
    f = fits.open(fname, mode='update')
    #Determine the reference chip and create the reference HSTWCS object
    nrefchip, nrefext = getNrefchip(f)
    wcsutil.restoreWCS(f, nrefext, wcskey='O')
    rwcs = HSTWCS(fobj=f, ext=nrefext)
    rwcs.readModel(update=True, header=f[nrefext].header)

    if 'DET2IMCorr' in allowed_corr:
        kw2update = det2im.DET2IMCorr.updateWCS(f)
        for kw in kw2update:
            f[1].header[kw] = kw2update[kw]

    for i in range(len(f))[1:]:
        extn = f[i]

        if 'extname' in extn.header:
            extname = extn.header['extname'].lower()
            if extname == 'sci':
                wcsutil.restoreWCS(f, ext=i, wcskey='O')
                sciextver = extn.header['extver']
                ref_wcs = rwcs.deepcopy()
                hdr = extn.header
                ext_wcs = HSTWCS(fobj=f, ext=i)
                ### check if it exists first!!!
                # 'O ' can be safely archived again because it has been restored first.
                wcsutil.archiveWCS(f,
                                   ext=i,
                                   wcskey="O",
                                   wcsname="OPUS",
                                   reusekey=True)
                ext_wcs.readModel(update=True, header=hdr)
                for c in allowed_corr:
                    if c != 'NPOLCorr' and c != 'DET2IMCorr':
                        corr_klass = corrections.__getattribute__(c)
                        kw2update = corr_klass.updateWCS(ext_wcs, ref_wcs)
                        for kw in kw2update:
                            hdr[kw] = kw2update[kw]
                # give the primary WCS a WCSNAME value
                idcname = f[0].header.get('IDCTAB', " ")
                if idcname.strip() and 'idc.fits' in idcname:
                    wname = ''.join([
                        'IDC_',
                        utils.extract_rootname(idcname, suffix='_idc')
                    ])
                else:
                    wname = " "
                hdr['WCSNAME'] = wname

            elif extname in ['err', 'dq', 'sdq', 'samp', 'time']:
                cextver = extn.header['extver']
                if cextver == sciextver:
                    hdr = f[('SCI', sciextver)].header
                    w = pywcs.WCS(hdr, f)
                    copyWCS(w, extn.header)

            else:
                continue

    if 'NPOLCorr' in allowed_corr:
        kw2update = npol.NPOLCorr.updateWCS(f)
        for kw in kw2update:
            f[1].header[kw] = kw2update[kw]
    # Finally record the version of the software which updated the WCS
    if 'HISTORY' in f[0].header:
        f[0].header.set('UPWCSVER',
                        value=stwcs.__version__,
                        comment="Version of STWCS used to updated the WCS",
                        before='HISTORY')
        f[0].header.set('PYWCSVER',
                        value=astropy.__version__,
                        comment="Version of PYWCS used to updated the WCS",
                        before='HISTORY')
    elif 'ASN_MTYP' in f[0].header:
        f[0].header.set('UPWCSVER',
                        value=stwcs.__version__,
                        comment="Version of STWCS used to updated the WCS",
                        after='ASN_MTYP')
        f[0].header.set('PYWCSVER',
                        value=astropy.__version__,
                        comment="Version of PYWCS used to updated the WCS",
                        after='ASN_MTYP')
    else:
        # Find index of last non-blank card, and insert this new keyword after that card
        for i in range(len(f[0].header) - 1, 0, -1):
            if f[0].header[i].strip() != '':
                break
            f[0].header.set('UPWCSVER',
                            stwcs.__version__,
                            "Version of STWCS used to updated the WCS",
                            after=i)
            f[0].header.set('PYWCSVER',
                            astropy.__version__,
                            "Version of PYWCS used to updated the WCS",
                            after=i)
    # add additional keywords to be used by headerlets
    distdict = utils.construct_distname(f, rwcs)
    f[0].header['DISTNAME'] = distdict['DISTNAME']
    f[0].header['SIPNAME'] = distdict['SIPNAME']
    # Make sure NEXTEND keyword remains accurate
    f[0].header['NEXTEND'] = len(f) - 1
    f.close()
Пример #32
0
 def __init__(self, refimg):
     super(MakeCat, self).__init__()
     self.refimg = str(refimg)
     self.refwcs = HSTWCS(self.refimg)
Пример #33
0
def characterize_gaia_distribution(hap_obj, log_level=logutil.logging.NOTSET):
    """Statistically describe distribution of GAIA sources in footprint.

    Computes and writes the file to a json file:

    - Number of GAIA sources
    - X centroid location
    - Y centroid location
    - X offset of centroid from image center
    - Y offset of centroid from image center
    - X standard deviation
    - Y standard deviation
    - minimum closest neighbor distance
    - maximum closest neighbor distance
    - mean closest neighbor distance
    - standard deviation of closest neighbor distances

    Parameters
    ----------
    hap_obj : drizzlepac.hlautils.Product.FilterProduct
        hap product object to process

    log_level : int, optional
        The desired level of verboseness in the log statements displayed on the screen and written to the .log file.
        Default value is 'NOTSET'.

    Returns
    -------
    Nothing
    """
    log.setLevel(log_level)

    # get table of GAIA sources in footprint
    gaia_table = generate_gaia_catalog(
        hap_obj, columns_to_remove=['mag', 'objID', 'GaiaID'])

    # if log_level is either 'DEBUG' or 'NOTSET', write out GAIA sources to DS9 region file
    if log_level <= logutil.logging.DEBUG:
        reg_file = "{}_gaia_sources.reg".format(hap_obj.drizzle_filename[:-9])
        gaia_table.write(reg_file, format='ascii.csv')
        log.debug(
            "Wrote GAIA source RA and Dec positions to DS9 region file '{}'".
            format(reg_file))

    # convert RA, Dec to image X, Y
    outwcs = HSTWCS(hap_obj.drizzle_filename + "[1]")
    x, y = outwcs.all_world2pix(gaia_table['RA'], gaia_table['DEC'], 1)

    # compute stats for the distribution
    centroid = [np.mean(x), np.mean(y)]
    centroid_offset = []
    for idx in range(0, 2):
        centroid_offset.append(outwcs.wcs.crpix[idx] - centroid[idx])
    std_dev = [np.std(x), np.std(y)]

    # Find straight-line distance to the closest neighbor for each GAIA source
    xys = np.array([x, y])
    xys = xys.reshape(len(x), 2)
    tree = KDTree(xys)
    neighborhood = tree.query(xys, 2)
    min_seps = np.empty([0])
    for sep_pair in neighborhood[0]:
        min_seps = np.append(min_seps, sep_pair[1])

    # add statistics to out_dict
    out_dict = collections.OrderedDict()
    out_dict["units"] = "pixels"
    out_dict["Number of GAIA sources"] = len(gaia_table)
    axis_list = ["X", "Y"]
    title_list = [
        "centroid", "offset of centroid from image center",
        "standard deviation"
    ]
    for item_value, item_title in zip([centroid, centroid_offset, std_dev],
                                      title_list):
        for axis_item in enumerate(axis_list):
            log.info("{} {} ({}): {}".format(axis_item[1], item_title,
                                             out_dict["units"],
                                             item_value[axis_item[0]]))
            out_dict["{} {}".format(axis_item[1],
                                    item_title)] = item_value[axis_item[0]]
    min_sep_stats = [
        min_seps.min(),
        min_seps.max(),
        min_seps.mean(),
        min_seps.std()
    ]
    min_sep_title_list = [
        "minimum closest neighbor distance",
        "maximum closest neighbor distance", "mean closest neighbor distance",
        "standard deviation of closest neighbor distances"
    ]
    for item_value, item_title in zip(min_sep_stats, min_sep_title_list):
        log.info("{} ({}): {}".format(item_title, out_dict["units"],
                                      item_value))
        out_dict[item_title] = item_value

    # write catalog to HapDiagnostic-formatted .json file.
    diag_obj = du.HapDiagnostic(log_level=log_level)
    diag_obj.instantiate_from_hap_obj(
        hap_obj,
        data_source="{}.characterize_gaia_distribution".format(__taskname__),
        description=
        "A statistical characterization of the distribution of GAIA sources in image footprint"
    )
    diag_obj.add_data_item(out_dict,
                           "distribution characterization statistics")
    diag_obj.write_json_file(hap_obj.drizzle_filename[:-9] +
                             "_svm_gaia_distribution_characterization.json",
                             clobber=True)
Пример #34
0
    def make_grismcat(self,
                      drizzle_image="",
                      catalog=None,
                      hard_angle=False,
                      hard_angle_value=90.):
        """Make the grism catalog.

        The method creates a new input object list. The positional
        information on objects in a drizzled image are projected
        back into the coordinate system of one input image.
        A selection is done on the basis of the projected coordinates,
        and the selected objects are stored to a new IOL file with an
        updated object angle that is in the projected coordinate system.

        Parameters
        ----------
        drizzle_image : str
            The name of the drizzled mosaic image that the catalog
            was constructed from.

        catalog : astropy.table.Table
            A the master catalog which will be used to
            create the dither image catalog with updated angles

        hard_angle : bool
            This will always set the extraction angle to 90degrees.

        hard_angle_value : float
            If a specified angle for extraction is preferred then
            this is the value to use and will replace THETA_IMAGE
            in the output catalog. It's currently specified in
            degrees, and is converted appropriately for whatever
            units are used in the catalog itself.

        Returns
        -------
        Nothing

        Notes
        -----
        This method previous took the names of two text files that contained the
        displayed position and angles calculated from the catalog entry. That
        functionality has been moved to memory since awtran is no longer in use.
        The catalog is read directly.
        """

        if not drizzle_image:
            raise aXeError("IOLPREP: No drizzle image specified.")

        if catalog is None:
            raise aXeError("IOLPREP: No input catalog provided.")
        if not isinstance(catalog, Table):
            raise aXeError(
                "IOLPREP: Expected input catalog to be an astropy table")

        _log.info("\n >>>> Working on Input Object List: {0:s} >>>>\n".format(
            self.iol_name))

        # now translate ra dec to new image pixel points
        # this must go through the wcs of the mosaic image
        # and then through the wcs for the individual image
        _log.info(
            "Converting coordinates using wcs from grism image {0}\n".format(
                self.filename))
        trad = catalog['THETA_IMAGE']
        xcat = catalog['X_IMAGE']
        ycat = catalog['Y_IMAGE']

        # translate to degrees if necessary
        if catalog['THETA_IMAGE'].unit.name == 'deg':
            translate = True
        elif catalog['THETA_IMAGE'].unit.name == 'rad':
            translate = False
            hard_angle_value = math.radians(hard_angle_value)
        else:
            raise aXeError("Unknown unit on THETA_IMAGE in input catalog")

        # 10.0 is a made up scaling length to use to
        # get the angle precision
        shifted_x = []
        shifted_y = []
        for t, x, y in zip(trad, xcat, ycat):
            if translate:
                angle = math.radians(t)

            shifted_x.append(x + 10.0 * math.cos(angle))
            shifted_y.append(y + 10.0 * math.sin(angle))

        # translate the catalog (x, y) to (ra, dec)
        mosaic_image_wcs = HSTWCS(drizzle_image, ext=1)
        mosaic_image_ra, mosaic_image_dec = mosaic_image_wcs.wcs_pix2world(
            shifted_x, shifted_y, 1)
        # compute  the location in the dithered image using the shifted coords
        dither_image_wcs = HSTWCS(self.filename)
        dither_image_x, dither_image_y = dither_image_wcs.all_world2pix(
            mosaic_image_ra, mosaic_image_dec, 1)
        trans_ra, trans_dec = mosaic_image_wcs.wcs_pix2world(xcat, ycat, 1)
        trans_x, trans_y = dither_image_wcs.all_world2pix(
            trans_ra, trans_dec, 1)

        output_catalog = deepcopy(catalog)
        for row in range(len(catalog) - 1, -1, -1):
            x = trans_x[row]
            y = trans_y[row]
            # check whether the object position is
            # in the range to be stored
            if ((self.dim_info[0] <= x <= self.dim_info[1])
                    and (self.dim_info[2] <= y <= self.dim_info[3])):

                # compute the new object angle
                dx = xcat[row] - x
                dy = ycat[row] - y
                angle = math.atan2(dy, dx)  # compute local angle change

                # return to degrees for catalog if necessary
                if translate:
                    angle = math.degrees(angle)

                # _log.info("dx {} dy {}  angle {} x,y: ({},{})\n".format(dx, dy, angle, x, y))
                # fill in the new position and angle
                output_catalog['X_IMAGE'][row] = trans_x[row]
                output_catalog['Y_IMAGE'][row] = trans_y[row]
                if hard_angle:
                    output_catalog['THETA_IMAGE'][row] = hard_angle_value
                else:
                    output_catalog['THETA_IMAGE'][row] = angle
            else:
                _log.info(
                    f"{x}\t{y}\t{self.dim_info}\t{output_catalog['NUMBER'][row]}"
                )
                output_catalog.remove_row(row)

        # save the new IOL, this is done especially for the C
        # code which is expecting Source Extractor style catalog
        # files. Decided to keep the output here consistent with
        # the C code so that it can continue to be used separately.
        # numbers start at 1 not zero. The output formatting allows
        # the astropy.sextractor formatter to read the catalog file.
        # There isn't currently an astropy writer for that format.
        if os.access(self.iol_name, os.F_OK):
            os.remove(self.iol_name)
        of = open(self.iol_name, 'w')
        for num, name in zip(range(len(output_catalog.colnames) + 1),
                             output_catalog.colnames):
            of.write("# {0:d} {1:s}\t\t{2:s}\t\t[{3:s}]\n".format(
                num + 1, name, output_catalog[name].description,
                str(output_catalog[name].unit)))
        output_catalog.write(of, format='ascii.no_header', overwrite=False)
        of.close()

        _log.info(
            f"\n >>>> Catalog: {self.iol_name} written with {len(catalog)} entries.>>>> \n"
        )
Пример #35
0
 def __init__(self, refimg):
     super(MakeCat, self).__init__()
     self.refimg = str(refimg)
     self.refwcs = HSTWCS(self.refimg)