コード例 #1
0
def main(msin, config_path):
    o = options(config_path, option_list)
    if o['pbimage'] is None:
        die('pbimage must be specified')

    # fix up the new list-type options
    for i, cat in enumerate(o['list']):
        try:
            o[cat] = o['filenames'][i]
        except:
            pass
        try:
            o[cat + '_matchrad'] = o['radii'][i]
        except:
            pass
        try:
            o[cat + '_fluxfactor'] = o['fluxfactor'][i]
        except:
            pass

    if o['logging'] is not None and not os.path.isdir(o['logging']):
        os.mkdir(o['logging'])

    # pybdsm source finding
    sfind_image(o['catprefix'], o['pbimage'], o['sfind_pixel_fraction'])

    # matching with catalogs
    for cat in o['list']:
        print 'Doing catalogue', cat
        crossmatch_image(o['catprefix'] + '.cat.fits', cat)
        filter_catalog(o['catprefix'] + '.cat.fits',
                       o['catprefix'] + '.cat.fits_' + cat + '_match.fits',
                       o['pbimage'],
                       o['catprefix'] + '.cat.fits_' + cat +
                       '_match_filtered.fits',
                       cat,
                       options=o)

    # Filter catalogs (only keep isolated compact sources within 3deg of pointing centre)

    # Astrometric plots
    if 'FIRST' in o['list']:
        report('Plotting position offsets')
        plot_position_offset(
            '%s.cat.fits_FIRST_match_filtered.fits' % o['catprefix'],
            o['pbimage'],
            '%s.cat.fits_FIRST_match_filtered_positions.png' % o['catprefix'],
            'FIRST',
            options=o)

        t = Table.read(o['catprefix'] + '.cat.fits_FIRST_match_filtered.fits')
        bsra = np.percentile(bootstrap(t['FIRST_dRA'], np.mean, 10000),
                             (16, 84))
        bsdec = np.percentile(bootstrap(t['FIRST_dDEC'], np.mean, 10000),
                              (16, 84))
        mdra = np.mean(t['FIRST_dRA'])
        mddec = np.mean(t['FIRST_dDEC'])
        print 'Mean delta RA is %.3f arcsec (1-sigma %.3f -- %.3f arcsec)' % (
            mdra, bsra[0], bsra[1])
        print 'Mean delta DEC is %.3f arcsec (1-sigma %.3f -- %.3f arcsec)' % (
            mddec, bsdec[0], bsdec[1])

        report('Plotting flux ratios')
        # Flux ratio plots (only compact sources)
        plot_flux_ratios(
            '%s.cat.fits_FIRST_match_filtered.fits' % o['catprefix'],
            o['pbimage'],
            '%s.cat.fits_FIRST_match_filtered_fluxerrors.png' % o['catprefix'],
            options=o)

    report('Plotting flux scale comparison')
    # Flux scale comparison plots
    if 'TGSS' in o['list']:
        plot_flux_errors(
            '%s.cat.fits_TGSS_match_filtered.fits' % o['catprefix'],
            o['pbimage'],
            '%s.cat.fits_TGSS_match_filtered_fluxratio.png' % o['catprefix'],
            'TGSS',
            options=o)
        t = Table.read(o['catprefix'] + '.cat.fits_TGSS_match_filtered.fits')
        ratios = t['Total_flux'] / (t['TGSS_Total_flux'] /
                                    o['TGSS_fluxfactor'])
        bsratio = np.percentile(bootstrap(ratios, np.median, 10000), (16, 84))
        print 'Median LOFAR/TGSS ratio is %.3f (1-sigma %.3f -- %.3f)' % (
            np.median(ratios), bsratio[0], bsratio[1])
    if 'NVSS' in o['list']:
        t = Table.read(o['catprefix'] + '.cat.fits_NVSS_match_filtered.fits')
        t = t[t['Total_flux'] > 10e-3]
        ratios = t['Total_flux'] / t['NVSS_Total_flux']
        bsratio = np.percentile(bootstrap(ratios, np.median, 10000), (16, 84))
        print 'Median LOFAR/NVSS ratio is %.3f (1-sigma %.3f -- %.3f)' % (
            np.median(ratios), bsratio[0], bsratio[1])
    # Noise estimate
    hdu = fits.open(o['pbimage'])

    imagenoise = get_rms(hdu)
    print 'An estimate of the image noise is %.3f muJy/beam' % (imagenoise *
                                                                1E6)
    return 0
コード例 #2
0
        tgss_scale = np.median(ratios)
    else:
        tgss_scale = None
    if 'NVSS' in o['list']:
        t = Table.read(o['catprefix'] + '.cat.fits_NVSS_match_filtered.fits')
        t = t[t['Total_flux'] > 10e-3]
        ratios = t['Total_flux'] / t['NVSS_Total_flux']
        bsratio = np.percentile(bootstrap(ratios, np.median, 10000), (16, 84))
        print 'Median LOFAR/NVSS ratio is %.3f (1-sigma %.3f -- %.3f)' % (
            np.median(ratios), bsratio[0], bsratio[1])
        nvss_scale = np.median(ratios)
    else:
        nvss_scale = None
    # Noise estimate
    hdu = fits.open(o['pbimage'])
    imagenoise = get_rms(hdu)
    rms = imagenoise * 1e6
    print 'An estimate of the image noise is %.3f muJy/beam' % rms
    drs = do_dr_checker(o['catprefix'] + '.cat.fits',
                        o['pbimage'],
                        verbose=False,
                        peak=0.4)
    dr = np.median(drs)
    print 'Median dynamic range is', dr

    # fit source counts
    if o['fit_sourcecounts']:
        from fit_sourcecounts import do_fit_sourcecounts
        sc_norm, sc_index, scale = do_fit_sourcecounts(rms=imagenoise)
    else:
        sc_norm = sc_index = scale = None
コード例 #3
0
        print 'Median LOFAR/TGSS ratio is %.3f (1-sigma %.3f -- %.3f)' % (np.median(ratios),bsratio[0],bsratio[1])
        tgss_scale=np.median(ratios)
    else:
        tgss_scale=None
    if 'NVSS' in o['list']:
        t=Table.read(o['catprefix']+'.cat.fits_NVSS_match_filtered.fits')
        t=t[t['Total_flux']>30e-3]
        ratios=t['Total_flux']/t['NVSS_Total_flux']
        bsratio=np.percentile(bootstrap(ratios,np.median,10000),(16,84))
        print 'Median LOFAR/NVSS ratio is %.3f (1-sigma %.3f -- %.3f)' % (np.median(ratios),bsratio[0],bsratio[1])
        nvss_scale=np.median(ratios)
    else:
        nvss_scale=None
    # Noise estimate
    hdu=fits.open(o['pbimage'])
    imagenoise = get_rms(hdu)
    rms=imagenoise*1e6
    print 'An estimate of the image noise is %.3f muJy/beam' % rms
    drs=do_dr_checker(o['catprefix']+'.cat.fits',o['pbimage'],verbose=False,peak=0.4)
    dr=np.median(drs)
    print 'Median dynamic range is',dr

    # fit source counts
    if o['fit_sourcecounts']:
        from fit_sourcecounts import do_fit_sourcecounts
        sc_norm,sc_index,scale=do_fit_sourcecounts(rms=imagenoise)
    else:
        sc_norm=sc_index=scale=None
    
    print rms,dr,catsources,first_ra,first_dec,tgss_scale,nvss_scale,sc_norm,sc_index,scale
コード例 #4
0
#!/usr/bin/python

# find the central rms of an image by iteratively removing the outliers

from __future__ import print_function
from __future__ import absolute_import
import numpy as np
from astropy.io import fits
from auxcodes import get_rms

if __name__ == '__main__':
    import sys
    for name in sys.argv[1:]:
        hdu = fits.open(name)
        print(name, get_rms(hdu))
コード例 #5
0
ファイル: mosaic.py プロジェクト: mhardcastle/ddf-pipeline
def make_mosaic(args):
    if args.scale is not None:
        if len(args.scale) != len(args.directories):
            die('Scales provided must match directories')

    if args.noise is not None:
        if len(args.noise) != len(args.directories):
            die('Noises provided must match directories')

    if args.rootname:
        rootname=args.rootname+'-'
    else:
        rootname=''

    if args.exact:
        reproj=reproject_exact
    else:
        reproj=reproject_interp_chunk_2d

    if args.do_lowres:
        intname='image_full_low_m.int.restored.fits'
        appname='image_full_low_m.app.restored.fits'
    elif args.use_shifted:
        intname='image_full_ampphase_di_m.NS_shift.int.facetRestored.fits'
        appname='image_full_ampphase_di_m.NS_shift.app.facetRestored.fits'
    else:
        intname='image_full_ampphase_di_m.NS.int.restored.fits'
        appname='image_full_ampphase_di_m.NS.app.restored.fits'

    # astromap blanking if required
    bth=None
    try:
        bth=float(args.astromap_blank)
    except:
        pass

    threshold=float(args.beamcut)
    hdus=[]
    app=[]
    astromaps=[]
    wcs=[]
    print 'Reading files...'
    noise=[]
    name=[]
    for d in args.directories:
        name.append(d.split('/')[-1])
        hdu=fits.open(d+'/'+intname)
        if args.find_noise:
	    print 'Estimating noise for', d+'/' + intname
	    if args.do_lowres:
	            noise.append(get_rms(hdu,boxsize=1500))
	    else:
	            noise.append(get_rms(hdu))
        hdus.append(flatten(hdu))
        app.append(flatten(fits.open(d+'/'+appname)))
        if bth:
            astromaps.append(flatten(fits.open(d+'/astromap.fits')))

    if args.find_noise:
        args.noise=noise
        print 'Noise values are:'
        for t,n in zip(name,noise):
            print t,n

    print 'Computing noise/beam factors...'
    for i in range(len(app)):
        np.seterr(divide='ignore')
	app[i].data=np.divide(app[i].data,hdus[i].data)
        app[i].data[app[i].data<threshold]=0
        # at this point this is the beam factor: we want 1/sigma**2.0, so divide by central noise and square
        if args.noise is not None:
            if args.scale is not None:
                app[i].data/=args.noise[i]*args.scale[i]
            else:
                app[i].data/=args.noise[i]

        app[i].data=app[i].data**2.0

        if args.scale is not None:
            hdus[i].data*=args.scale[i]

    if args.shift:
        print 'Finding shifts (NOTE THIS CODE IS OBSOLETE)...'
        # shift according to the FIRST delta ra/dec from quality pipeline
        dras=[]
        ddecs=[]
        for d in args.directories:
            t=Table.read(d+'/image_full_ampphase1m.cat.fits_FIRST_match_filtered.fits')
            dras.append(np.mean(t['FIRST_dRA']))
            ddecs.append(np.mean(t['FIRST_dDEC']))
        print 'Applying shifts:',dras,ddecs
        for i in range(len(app)):
            for hdu in [hdus[i],app[i]]:
                ra=hdu.header['CRVAL1']
                dec=hdu.header['CRVAL2']
                hdu.header['CRVAL1']-=dras[i]/(3600.0*np.cos(np.pi*dec/180.0))
                hdu.header['CRVAL2']-=ddecs[i]/3600.0

    for i in range(len(app)):
        wcs.append(WCS(hdus[i].header))

    # astromap blanking
    if bth:
        print 'Blanking using astrometry quality maps with threshold',bth,'arcsec'
        for i in range(len(app)):
            outname=rootname+'astroblank-'+name[i]+'.fits'
            if args.load and os.path.isfile(outname):
                print 'Loading previously blanked image'
                hdu=fits.open(outname)
                hdus[i].data=hdu[0].data
            else:
                print 'Blanking image',i
                dmaxy,dmaxx=hdus[i].data.shape
                count=0
                am=astromaps[i]
                awcs=WCS(am.header)
                maxy,maxx=am.data.shape
                for y in range(maxy):
                    for x in range(maxx):
                        value=am.data[y,x]
                        if np.isnan(value):
                            if y<maxy-1:
                                value=am.data[y+1,x]
                        if value>bth:
                            ra,dec=[float(f) for f in awcs.wcs_pix2world(x,y,0)]
                            rx,ry=[int(p) for p in wcs[i].wcs_world2pix(ra,dec,0)]
                            rxp=rx+21 # astromap pix size, with margin
                            ryp=ry+21
                            if rx<0: rx=0
                            if ry<0: ry=0
                            if rxp>dmaxx: rxp=dmaxx
                            if ryp>dmaxy: ryp=dmaxy
                            hdus[i].data[ry:ryp,rx:rxp]=np.nan
                            count+=1
                print '... blanked',count*900.0/3600,'square arcmin'
                outname=rootname+'astroblank-'+name[i]+'.fits'
                if args.save: hdus[i].writeto(outname,clobber=True)
            app[i].data[np.isnan(hdus[i].data)]=np.nan

    # If the header is directly passed in, use it
    try:
        header=args.header
        xsize=header['NAXIS1']
        ysize=header['NAXIS2']
        print 'Mosaic using header passed from calling program'
    except:
        header=None
    if header is None:
        if args.load_layout:
            with open(rootname+'mosaic-header.pickle') as f:
                header=pickle.load(f)
            xsize=header['NAXIS1']
            ysize=header['NAXIS2']
            print 'Mosaic using loaded header'
        else:
            print 'Creating the mosaic header'
            ras=np.array([w.wcs.crval[0] for w in wcs])
            decs=np.array([w.wcs.crval[1] for w in wcs])

            mra=np.mean(ras)
            mdec=np.mean(decs)
            print 'Will make mosaic at',mra,mdec

            # we make a reference WCS and use it to find the extent in pixels
            # needed for the combined image

            rwcs=WCS(naxis=2)
            rwcs.wcs.ctype=wcs[0].wcs.ctype
            rwcs.wcs.cdelt=wcs[0].wcs.cdelt
            rwcs.wcs.crval=[mra,mdec]
            rwcs.wcs.crpix=[1,1]

            xmin=0
            xmax=0
            ymin=0
            ymax=0
            for a,w in zip(app,wcs):
                ys,xs=np.where(a.data)
                axmin=xs.min()
                aymin=ys.min()
                axmax=xs.max()
                aymax=ys.max()
                del(xs)
                del(ys)
                print 'non-zero',axmin,aymin,axmax,aymax
                for x,y in ((axmin,aymin),(axmax,aymin),(axmin,aymax),(axmax,aymax)):
                    ra,dec=[float(f) for f in w.wcs_pix2world(x,y,0)]
                    #print ra,dec
                    nx,ny=[float (f) for f in rwcs.wcs_world2pix(ra,dec,0)]
                    print nx,ny
                    if nx<xmin: xmin=nx
                    if nx>xmax: xmax=nx
                    if ny<ymin: ymin=ny
                    if ny>ymax: ymax=ny

            print 'co-ord range:', xmin, xmax, ymin, ymax

            xsize=int(xmax-xmin)
            ysize=int(ymax-ymin)

            rwcs.wcs.crpix=[-int(xmin)+1,-int(ymin)+1]
            print 'checking:', rwcs.wcs_world2pix(mra,mdec,0)
            print rwcs

            header=rwcs.to_header()
            header['NAXIS']=2
            header['NAXIS1']=xsize
            header['NAXIS2']=ysize

            with open(rootname+'mosaic-header.pickle','w') as f:
                pickle.dump(header,f)

    isum=np.zeros([ysize,xsize])
    wsum=np.zeros_like(isum)
    mask=np.zeros_like(isum,dtype=np.bool)
    print 'now making the mosaic'
    for i in range(len(hdus)):
        print 'image',i,'(',name[i],')'
        outname=rootname+'reproject-'+name[i]+'.fits'
        if args.load and os.path.exists(outname):
            print 'loading...'
            hdu=fits.open(outname)
            r=hdu[0].data
        else:
            print 'reprojecting...'
            r, footprint = reproj(hdus[i], header, hdu_in=0, parallel=False)
            r[np.isnan(r)]=0
            hdu = fits.PrimaryHDU(header=header,data=r)
            if args.save: hdu.writeto(outname,clobber=True)
        print 'weights',i,'(',name[i],')'
        outname=rootname+'weight-'+name[i]+'.fits'
        if args.load and os.path.exists(outname):
            print 'loading...'
            hdu=fits.open(outname)
            w=hdu[0].data
            mask|=(w>0)
        else:
            print 'reprojecting...'
            w, footprint = reproj(app[i], header, hdu_in=0, parallel=False)
            mask|=~np.isnan(w)
            w[np.isnan(w)]=0
            hdu = fits.PrimaryHDU(header=header,data=w)
            if args.save: hdu.writeto(outname,clobber=True)
        print 'add to mosaic...'
        isum+=r*w
        wsum+=w

    if not(args.no_write):
        isum/=wsum
        # mask now contains True where a non-nan region was present in either map
        isum[~mask]=np.nan
        for ch in ('BMAJ', 'BMIN', 'BPA'):
            header[ch]=hdus[0].header[ch]
        header['ORIGIN']='ddf-pipeline '+version()

        hdu = fits.PrimaryHDU(header=header,data=isum)
        hdu.writeto(rootname+'mosaic.fits',clobber=True)

        hdu = fits.PrimaryHDU(header=header,data=wsum)
        hdu.writeto(rootname+'mosaic-weights.fits',clobber=True)
コード例 #6
0
def do_fit_sourcecounts(t=None, rms=None,do_plots=False,sfindarea=17.09):

    global fluxnorm,ds
    if t is None:
        t=Table.read('image_full_ampphase_di_m.NS.cat.fits')
    print 'Number of sources in full table is',len(t)
    if rms is None:
        rms=get_rms(fits.open('image_full_ampphase_di_m.NS_shift.int.facetRestored.fits'))

    cutoff=rms*20
    print 'Cutoff will be',cutoff,'Jy'
    #cutoff=1e-3
    maxflux=np.max(t['Total_flux'])
    t=t[t['Total_flux']>cutoff]
    print 'Number of sources after completeness cut is',len(t)

    bins=np.logspace(np.log10(cutoff),np.log10(maxflux)*1.01,20)
    cbins=0.5*(bins[:-1]+bins[1:])
    ds=bins[1:]-bins[:-1]

    hist,_=np.histogram(t['Total_flux'],bins=bins)

    zval=None
    for i in range(len(hist)):
        if hist[i]==0:
            zval=i
            break

    if zval is not None:
        cbins=cbins[:zval]
        hist=hist[:zval]
        ds=ds[:zval]

    nwalkers=10
    ndim=2
    fluxnorm=0.1 # find normalization at this flux in Jy

    sampler = emcee.EnsembleSampler(nwalkers, ndim, lnpost,
                                    args=(cbins,hist))

    pos=[[1.6,0.86]+0.01*np.random.normal(size=ndim)
         for i in range(nwalkers)]

    sampler.run_mcmc(pos, 1000)
    samples=sampler.chain[:, 200:, :].reshape((-1, ndim))

    samplest=samples.transpose()

    means=np.mean(samplest,axis=1)
    errors=np.percentile(samplest,(16,84),axis=1)-means

    for i in range(ndim):
        print i,means[i],errors[0,i],errors[1,i]

    fnorm=means[0]
    falpha=means[1]

    C=[3.5142,0.3738,-0.3138,-0.0717,0.0213,0.0097] # Intema+

    fn=fluxnorm
    totfactor=1.0
    for i in range(10):
        lf=np.log10(fn)
        ncn=0
        for i in range(6):
            ncn+=C[i]*lf**i
        print 'for %.3f Jy number count norm should be %f' % (fn,10**ncn)
        measured_ncn=10**fnorm*fluxnorm*(fn**1.5)*3282.8/sfindarea # check precise area
        print 'measured number count norm is',measured_ncn 
        scale=(measured_ncn/10**ncn)#**(1.0/1.5)
        print 'scaling factor should be',scale
        totfactor*=scale
        print 'total factor is',totfactor
        fn=fluxnorm/totfactor
        print 'New flux norm value is',fn
        if abs(scale-1.0)<1e-4:
            print 'Converged, stopping'
            break

    if do_plots:
        import matplotlib.pyplot as plt
        import corner
        
        plt.scatter(cbins,hist)
        plt.xscale('log')
        plt.yscale('log')
        plt.xlim(cutoff,maxflux)
        plt.ylim(0.5,np.max(hist)*1.3)

        yv=model(cbins,fnorm,falpha)
        plt.plot(cbins,yv)

        fig = corner.corner(samples)
        plt.show()

    return fnorm,falpha,totfactor
コード例 #7
0
ファイル: mosaic.py プロジェクト: rvweeren/ddf-pipeline
def make_mosaic(args):
    if args.scale is not None:
        if len(args.scale) != len(args.directories):
            die('Scales provided must match directories')

    if args.noise is not None:
        if len(args.noise) != len(args.directories):
            die('Noises provided must match directories')

    if args.rootname:
        rootname = args.rootname + '-'
    else:
        rootname = ''

    if args.exact:
        reproj = reproject_exact
    else:
        reproj = reproject_interp_chunk_2d

    if args.do_lowres:
        intname = 'image_full_low_m.int.restored.fits'
        appname = 'image_full_low_m.app.restored.fits'
    elif args.use_shifted:
        intname = 'image_full_ampphase_di_m.NS_shift.int.facetRestored.fits'
        appname = 'image_full_ampphase_di_m.NS_shift.app.facetRestored.fits'
    else:
        intname = 'image_full_ampphase_di_m.NS.int.restored.fits'
        appname = 'image_full_ampphase_di_m.NS.app.restored.fits'

    # astromap blanking if required
    bth = None
    try:
        bth = float(args.astromap_blank)
    except:
        pass

    threshold = float(args.beamcut)
    hdus = []
    app = []
    astromaps = []
    wcs = []
    print 'Reading files...'
    noise = []
    name = []
    for d in args.directories:
        name.append(d.split('/')[-1])
        hdu = fits.open(d + '/' + intname)
        if args.find_noise:
            print 'Estimating noise for', d + '/' + intname
            if args.do_lowres:
                noise.append(get_rms(hdu, boxsize=1500))
            else:
                noise.append(get_rms(hdu))
        hdus.append(flatten(hdu))
        app.append(flatten(fits.open(d + '/' + appname)))
        if bth:
            astromaps.append(flatten(fits.open(d + '/astromap.fits')))

    if args.find_noise:
        args.noise = noise
        print 'Noise values are:'
        for t, n in zip(name, noise):
            print t, n

    print 'Computing noise/beam factors...'
    for i in range(len(app)):
        np.seterr(divide='ignore')
        app[i].data = np.divide(app[i].data, hdus[i].data)
        app[i].data[app[i].data < threshold] = 0
        # at this point this is the beam factor: we want 1/sigma**2.0, so divide by central noise and square
        if args.noise is not None:
            if args.scale is not None:
                app[i].data /= args.noise[i] * args.scale[i]
            else:
                app[i].data /= args.noise[i]

        app[i].data = app[i].data**2.0

        if args.scale is not None:
            hdus[i].data *= args.scale[i]

    if args.shift:
        print 'Finding shifts (NOTE THIS CODE IS OBSOLETE)...'
        # shift according to the FIRST delta ra/dec from quality pipeline
        dras = []
        ddecs = []
        for d in args.directories:
            t = Table.read(
                d +
                '/image_full_ampphase1m.cat.fits_FIRST_match_filtered.fits')
            dras.append(np.mean(t['FIRST_dRA']))
            ddecs.append(np.mean(t['FIRST_dDEC']))
        print 'Applying shifts:', dras, ddecs
        for i in range(len(app)):
            for hdu in [hdus[i], app[i]]:
                ra = hdu.header['CRVAL1']
                dec = hdu.header['CRVAL2']
                hdu.header['CRVAL1'] -= dras[i] / (3600.0 *
                                                   np.cos(np.pi * dec / 180.0))
                hdu.header['CRVAL2'] -= ddecs[i] / 3600.0

    for i in range(len(app)):
        wcs.append(WCS(hdus[i].header))

    # astromap blanking
    if bth:
        print 'Blanking using astrometry quality maps with threshold', bth, 'arcsec'
        for i in range(len(app)):
            outname = rootname + 'astroblank-' + name[i] + '.fits'
            if args.load and os.path.isfile(outname):
                print 'Loading previously blanked image'
                hdu = fits.open(outname)
                hdus[i].data = hdu[0].data
            else:
                print 'Blanking image', i
                dmaxy, dmaxx = hdus[i].data.shape
                count = 0
                am = astromaps[i]
                awcs = WCS(am.header)
                maxy, maxx = am.data.shape
                for y in range(maxy):
                    for x in range(maxx):
                        value = am.data[y, x]
                        if np.isnan(value):
                            if y < maxy - 1:
                                value = am.data[y + 1, x]
                        if value > bth:
                            ra, dec = [
                                float(f) for f in awcs.wcs_pix2world(x, y, 0)
                            ]
                            rx, ry = [
                                int(p)
                                for p in wcs[i].wcs_world2pix(ra, dec, 0)
                            ]
                            rxp = rx + 21  # astromap pix size, with margin
                            ryp = ry + 21
                            if rx < 0: rx = 0
                            if ry < 0: ry = 0
                            if rxp > dmaxx: rxp = dmaxx
                            if ryp > dmaxy: ryp = dmaxy
                            hdus[i].data[ry:ryp, rx:rxp] = np.nan
                            count += 1
                print '... blanked', count * 900.0 / 3600, 'square arcmin'
                outname = rootname + 'astroblank-' + name[i] + '.fits'
                if args.save: hdus[i].writeto(outname, clobber=True)
            app[i].data[np.isnan(hdus[i].data)] = np.nan

    # If the header is directly passed in, use it
    try:
        header = args.header
        xsize = header['NAXIS1']
        ysize = header['NAXIS2']
        print 'Mosaic using header passed from calling program'
    except:
        header = None
    if header is None:
        if args.load_layout:
            with open(rootname + 'mosaic-header.pickle') as f:
                header = pickle.load(f)
            xsize = header['NAXIS1']
            ysize = header['NAXIS2']
            print 'Mosaic using loaded header'
        else:
            print 'Creating the mosaic header'
            ras = np.array([w.wcs.crval[0] for w in wcs])
            decs = np.array([w.wcs.crval[1] for w in wcs])

            mra = np.mean(ras)
            mdec = np.mean(decs)
            print 'Will make mosaic at', mra, mdec

            # we make a reference WCS and use it to find the extent in pixels
            # needed for the combined image

            rwcs = WCS(naxis=2)
            rwcs.wcs.ctype = wcs[0].wcs.ctype
            rwcs.wcs.cdelt = wcs[0].wcs.cdelt
            rwcs.wcs.crval = [mra, mdec]
            rwcs.wcs.crpix = [1, 1]

            xmin = 0
            xmax = 0
            ymin = 0
            ymax = 0
            for a, w in zip(app, wcs):
                ys, xs = np.where(a.data)
                axmin = xs.min()
                aymin = ys.min()
                axmax = xs.max()
                aymax = ys.max()
                del (xs)
                del (ys)
                print 'non-zero', axmin, aymin, axmax, aymax
                for x, y in ((axmin, aymin), (axmax, aymin), (axmin, aymax),
                             (axmax, aymax)):
                    ra, dec = [float(f) for f in w.wcs_pix2world(x, y, 0)]
                    #print ra,dec
                    nx, ny = [float(f) for f in rwcs.wcs_world2pix(ra, dec, 0)]
                    print nx, ny
                    if nx < xmin: xmin = nx
                    if nx > xmax: xmax = nx
                    if ny < ymin: ymin = ny
                    if ny > ymax: ymax = ny

            print 'co-ord range:', xmin, xmax, ymin, ymax

            xsize = int(xmax - xmin)
            ysize = int(ymax - ymin)

            rwcs.wcs.crpix = [-int(xmin) + 1, -int(ymin) + 1]
            print 'checking:', rwcs.wcs_world2pix(mra, mdec, 0)
            print rwcs

            header = rwcs.to_header()
            header['NAXIS'] = 2
            header['NAXIS1'] = xsize
            header['NAXIS2'] = ysize

            with open(rootname + 'mosaic-header.pickle', 'w') as f:
                pickle.dump(header, f)

    isum = np.zeros([ysize, xsize])
    wsum = np.zeros_like(isum)
    mask = np.zeros_like(isum, dtype=np.bool)
    print 'now making the mosaic'
    for i in range(len(hdus)):
        print 'image', i, '(', name[i], ')'
        outname = rootname + 'reproject-' + name[i] + '.fits'
        if args.load and os.path.exists(outname):
            print 'loading...'
            hdu = fits.open(outname)
            r = hdu[0].data
        else:
            print 'reprojecting...'
            r, footprint = reproj(hdus[i], header, hdu_in=0, parallel=False)
            r[np.isnan(r)] = 0
            hdu = fits.PrimaryHDU(header=header, data=r)
            if args.save: hdu.writeto(outname, clobber=True)
        print 'weights', i, '(', name[i], ')'
        outname = rootname + 'weight-' + name[i] + '.fits'
        if args.load and os.path.exists(outname):
            print 'loading...'
            hdu = fits.open(outname)
            w = hdu[0].data
            mask |= (w > 0)
        else:
            print 'reprojecting...'
            w, footprint = reproj(app[i], header, hdu_in=0, parallel=False)
            mask |= ~np.isnan(w)
            w[np.isnan(w)] = 0
            hdu = fits.PrimaryHDU(header=header, data=w)
            if args.save: hdu.writeto(outname, clobber=True)
        print 'add to mosaic...'
        isum += r * w
        wsum += w

    if not (args.no_write):
        isum /= wsum
        # mask now contains True where a non-nan region was present in either map
        isum[~mask] = np.nan
        for ch in ('BMAJ', 'BMIN', 'BPA'):
            header[ch] = hdus[0].header[ch]
        header['ORIGIN'] = 'ddf-pipeline ' + version()

        hdu = fits.PrimaryHDU(header=header, data=isum)
        hdu.writeto(rootname + 'mosaic.fits', clobber=True)

        hdu = fits.PrimaryHDU(header=header, data=wsum)
        hdu.writeto(rootname + 'mosaic-weights.fits', clobber=True)
コード例 #8
0
ファイル: cent-rms.py プロジェクト: mhardcastle/ddf-pipeline
#!/usr/bin/python

# find the central rms of an image by iteratively removing the outliers

import numpy as np
from astropy.io import fits
from auxcodes import get_rms

if __name__=='__main__':
    import sys
    for name in sys.argv[1:]:
        hdu=fits.open(name)
        print name,get_rms(hdu)
コード例 #9
0
def do_fit_sourcecounts(t=None, rms=None, do_plots=False, sfindarea=17.09):

    global fluxnorm, ds
    if t is None:
        t = Table.read('image_full_ampphase_di_m.NS.cat.fits')
    print('Number of sources in full table is', len(t))
    if rms is None:
        rms = get_rms(
            fits.open(
                'image_full_ampphase_di_m.NS_shift.int.facetRestored.fits'))

    cutoff = rms * 20
    print('Cutoff will be', cutoff, 'Jy')
    #cutoff=1e-3
    maxflux = np.max(t['Total_flux'])
    t = t[t['Total_flux'] > cutoff]
    print('Number of sources after completeness cut is', len(t))

    bins = np.logspace(np.log10(cutoff), np.log10(maxflux) * 1.01, 20)
    cbins = 0.5 * (bins[:-1] + bins[1:])
    ds = bins[1:] - bins[:-1]

    hist, _ = np.histogram(t['Total_flux'], bins=bins)

    zval = None
    for i in range(len(hist)):
        if hist[i] == 0:
            zval = i
            break

    if zval is not None:
        cbins = cbins[:zval]
        hist = hist[:zval]
        ds = ds[:zval]

    nwalkers = 10
    ndim = 2
    fluxnorm = 0.1  # find normalization at this flux in Jy

    sampler = emcee.EnsembleSampler(nwalkers, ndim, lnpost, args=(cbins, hist))

    pos = [[1.6, 0.86] + 0.01 * np.random.normal(size=ndim)
           for i in range(nwalkers)]

    sampler.run_mcmc(pos, 1000)
    samples = sampler.chain[:, 200:, :].reshape((-1, ndim))

    samplest = samples.transpose()

    means = np.mean(samplest, axis=1)
    errors = np.percentile(samplest, (16, 84), axis=1) - means

    for i in range(ndim):
        print(i, means[i], errors[0, i], errors[1, i])

    fnorm = means[0]
    falpha = means[1]

    C = [3.5142, 0.3738, -0.3138, -0.0717, 0.0213, 0.0097]  # Intema+

    fn = fluxnorm
    totfactor = 1.0
    for i in range(10):
        lf = np.log10(fn)
        ncn = 0
        for i in range(6):
            ncn += C[i] * lf**i
        print('for %.3f Jy number count norm should be %f' % (fn, 10**ncn))
        measured_ncn = old_div(10**fnorm * fluxnorm * (fn**1.5) * 3282.8,
                               sfindarea)  # check precise area
        print('measured number count norm is', measured_ncn)
        scale = (old_div(measured_ncn, 10**ncn))  #**(1.0/1.5)
        print('scaling factor should be', scale)
        totfactor *= scale
        print('total factor is', totfactor)
        fn = old_div(fluxnorm, totfactor)
        print('New flux norm value is', fn)
        if abs(scale - 1.0) < 1e-4:
            print('Converged, stopping')
            break

    if do_plots:
        import matplotlib.pyplot as plt
        import corner

        plt.scatter(cbins, hist)
        plt.xscale('log')
        plt.yscale('log')
        plt.xlim(cutoff, maxflux)
        plt.ylim(0.5, np.max(hist) * 1.3)

        yv = model(cbins, fnorm, falpha)
        plt.plot(cbins, yv)

        fig = corner.corner(samples)
        plt.show()

    return fnorm, falpha, totfactor
コード例 #10
0
#!/usr/bin/python

# find the central rms of an image by iteratively removing the outliers

import numpy as np
from astropy.io import fits
from auxcodes import get_rms

if __name__ == '__main__':
    import sys
    for name in sys.argv[1:]:
        hdu = fits.open(name)
        print name, get_rms(hdu)
コード例 #11
0
def make_extended_mask(infile,
                       fullresfile,
                       rmsthresh=3.0,
                       sizethresh=2500,
                       maxsize=25000,
                       rootname=None,
                       verbose=False,
                       rmsfacet=False,
                       ds9region='image_dirin_SSD.tessel.reg'):
    ''' infile is the input low-res image, fullresfile is the full-resolution template image, sizethresh the minimum island size in pixels '''

    if rootname is None:
        prefix = ''
    else:
        prefix = rootname + '-'

    hdu = fits.open(infile)
    if rmsfacet == False:
        rms = get_rms(hdu)
    if rmsfacet == True:
        get_rms_map2(infile, ds9region, prefix + 'rms-low.fits')
        hdu2 = fits.open(prefix + 'rms-low.fits')
        rms = hdu2[0].data[0, 0, :]

    det = hdu[0].data[0, 0, :] > rmsthresh * rms
    labels, count = nd.label(det)

    print 'found', count, 'islands'
    #label, counts = np.unique(labels, return_counts=True)
    label = np.unique(labels)
    counts = np.bincount(labels.flatten())

    big = (counts > sizethresh) & (counts < maxsize)
    big_regions = label[big]

    print 'Found', len(big_regions) - 1, 'large islands'
    if verbose: print counts[big]

    mask = np.zeros_like(det, dtype=int)
    for l in big_regions:
        if l: mask += l * (labels == l)

    slices = nd.find_objects(mask)
    big_slices = [slices[i - 1] for i in big_regions if i]
    kernel = np.ones((3, 3))
    mask = convolve2d(mask, kernel, mode='same', fillvalue=0)
    mask = (mask > 1)
    w = WCS(hdu[0].header)
    hdu[0].data = mask.astype(np.float32)
    hdu.writeto(prefix + 'mask-low.fits', clobber=True)

    if fullresfile is not None:

        # regrid all the objects onto the full-res image, if a template is supplied
        hduf = fits.open(fullresfile)
        maskf = np.zeros_like(hduf[0].data[0, 0, :, :])
        wf = WCS(hduf[0].header)

        for slice in big_slices:
            yslice = slice[0]
            xslice = slice[1]
            ymin = yslice.start
            ymax = yslice.stop
            xmin = xslice.start
            xmax = xslice.stop
            ppos = []
            for x in [xmax, xmin]:
                for y in [ymax, ymin]:
                    ppos.append([x, y, 0, 0])
            worldlim = w.wcs_pix2world(ppos, 0)
            wpos = []
            for ra in [worldlim[:, 0].min(), worldlim[:, 0].max()]:
                for dec in [worldlim[:, 1].min(), worldlim[:, 1].max()]:
                    wpos.append([ra, dec, 0, 0])
            pixlim = wf.wcs_world2pix(wpos, 0)
            xminf = int(pixlim[:, 0].min())
            xmaxf = int(pixlim[:, 0].max())
            yminf = int(pixlim[:, 1].min())
            ymaxf = int(pixlim[:, 1].max())
            xs = np.arange(xminf, xmaxf)
            ys = np.arange(yminf, ymaxf)
            x, y = np.meshgrid(xs, ys)
            x = x.flatten()
            y = y.flatten()
            pix = np.array([x, y, np.zeros_like(x), np.zeros_like(x)]).T
            world = wf.wcs_pix2world(pix, 0)
            opix = w.wcs_world2pix(world, 0)
            for xv, yv, op in zip(x, y, opix):
                try:
                    if mask[int(op[1]), int(op[0])] > 0:
                        maskf[yv, xv] = 1
                except IndexError:
                    # catch wcs mismatches or similar
                    pass

        hduf[0].data = maskf.astype(np.float32)
        hduf.writeto(prefix + 'mask-high.fits', clobber=True)
コード例 #12
0
def make_extended_mask(infile,fullresfile,rmsthresh=3.0,sizethresh=2500,maxsize=25000,rootname=None,verbose=False,rmsfacet=False,ds9region='image_dirin_SSD_m_c.tessel.reg'):
    ''' infile is the input low-res image, fullresfile is the full-resolution template image, sizethresh the minimum island size in pixels '''

    if rootname is None:
        prefix=''
    else:
        prefix=rootname+'-'

    hdu=fits.open(infile)
    if rmsfacet == False:
        rms=get_rms(hdu)
    if rmsfacet == True:
        get_rms_map2(infile,ds9region,prefix+'rms-low.fits')
        hdu2=fits.open(prefix+'rms-low.fits')
        rms=hdu2[0].data[0,0,:]

    det=hdu[0].data[0,0,:]>rmsthresh*rms
    labels, count = nd.label(det)

    print 'found',count,'islands'
    #label, counts = np.unique(labels, return_counts=True)
    label=np.unique(labels)
    counts=np.bincount(labels.flatten())

    big=(counts>sizethresh) & (counts<maxsize)
    big_regions=label[big]

    print 'Found',len(big_regions)-1,'large islands'
    if verbose: print counts[big]

    mask=np.zeros_like(det,dtype=int)
    for l in big_regions:
        if l: mask+=l*(labels==l)

    slices=nd.find_objects(mask)
    big_slices=[slices[i-1] for i in big_regions if i]
    kernel = np.ones((3,3))
    mask = convolve2d(mask, kernel, mode='same', fillvalue=0)
    mask = (mask>1)
    w=WCS(hdu[0].header)
    hdu[0].data=mask.astype(np.float32)
    hdu.writeto(prefix+'mask-low.fits',clobber=True)

    if fullresfile is not None:

        # regrid all the objects onto the full-res image, if a template is supplied
        hduf=fits.open(fullresfile)
        maskf=np.zeros_like(hduf[0].data[0,0,:,:])
        wf=WCS(hduf[0].header)


        for slice in big_slices:
            yslice=slice[0]
            xslice=slice[1]
            ymin=yslice.start
            ymax=yslice.stop
            xmin=xslice.start
            xmax=xslice.stop
            ppos=[]
            for x in [xmax,xmin]:
                for y in [ymax,ymin]:
                   ppos.append([x,y,0,0])
            worldlim=w.wcs_pix2world(ppos,0)
            wpos=[]
            for ra in [worldlim[:,0].min(),worldlim[:,0].max()]:
                for dec in [worldlim[:,1].min(),worldlim[:,1].max()]:
                    wpos.append([ra,dec,0,0])
            pixlim=wf.wcs_world2pix(wpos,0)
            xminf=int(pixlim[:,0].min())
            xmaxf=int(pixlim[:,0].max())
            yminf=int(pixlim[:,1].min())
            ymaxf=int(pixlim[:,1].max())
            xs=np.arange(xminf,xmaxf)
            ys=np.arange(yminf,ymaxf)
            x,y=np.meshgrid(xs,ys)
            x=x.flatten()
            y=y.flatten()
            pix=np.array([x,y,np.zeros_like(x),np.zeros_like(x)]).T
            world=wf.wcs_pix2world(pix,0)
            opix=w.wcs_world2pix(world,0)
            for xv,yv,op in zip(x,y,opix):
                try:
                    if mask[int(op[1]),int(op[0])]>0:
                        maskf[yv,xv]=1
                except IndexError:
                    # catch wcs mismatches or similar
                    pass

        hduf[0].data=maskf.astype(np.float32)
        hduf.writeto(prefix+'mask-high.fits',clobber=True)