def dwarf_filter(ra, dec, fwhm_small=2.0, fwhm_big=20): # Based on Koposov et al. (2008). # Code by Ken Mighell and Mike Fitzpatrick. # Minor edits by Rorbert Nikutta. x, y = ra, dec # Information about declination (y) [degrees] ymean = (y.min() + y.max()) / 2.0 ydiff_arcmin = (y.max() - y.min()) * 60.0 # convert from degrees to arcmin # Information about right ascension (x) [degrees in time]: xdiff = x.max() - x.min() # angular separation [degrees (time)] xmean = (x.min() + x.max()) / 2.0 # convert from degrees in time to separation in angular degrees: xdiff_angular = (x.max() - x.min()) * np.cos(ymean * (np.pi / 180.0)) # convert from degress to arcmin xdiff_angular_arcmin = xdiff_angular * 60.0 # Get the number of one-arcmin pixels in the X and Y directions: nx = np.rint(xdiff_angular_arcmin).astype('int') ny = np.rint(ydiff_arcmin).astype('int') # Create a two-dimensional histogram of the raw counts: Counts, xedges, yedges = np.histogram2d(x, y, (nx, ny)) extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]] raw_hist = np.rot90(Counts).copy() # hack around Pythonic weirdness # Make the small and big Gaussian kernels with a standard deviation # of the given FWHM in arcmin^2 pixels. kernel_small = convolution.Gaussian2DKernel(fwhm_small / 2.35, factor=1) kernel_big = convolution.Gaussian2DKernel(fwhm_big / 2.35, factor=1) # Compute the differential convolution kernels. conv_big = convolution.convolve(raw_hist, kernel_big) conv_small = convolution.convolve(raw_hist, kernel_small) conv_delta = conv_small - conv_big delta = conv_delta.copy() # Compute statistics and the floor mean = np.mean(delta, dtype='float64') sigma = np.std(delta, dtype='float64') sigmaRaw = np.std(raw_hist, dtype='float64') median = np.median(delta) # not used floor = mean # Clip to specified limits. clipped = delta.copy() clipped[delta < floor] = floor # Return the computed fields. return raw_hist, extent, delta, clipped, sigma
def __init__( self, grid_size, beam_maj_au=None, beam_min_au=None, vreso_kms=None, beam_pa_deg=0, mode="fft", ): # relation : standard deviation = 1/(2 sqrt(ln(2))) * FWHM of Gaussian # theta_deg : cclw is positive self.mode = mode sigma_over_FWHM = 2 * np.sqrt(2 * np.log(2)) conv_size = [beam_maj_au + 1e-100, beam_min_au + 1e-100] if vreso_kms is not None: conv_size += [vreso_kms + 1e-100] stddev = np.array(conv_size) / np.array(grid_size) / sigma_over_FWHM beampa = np.radians(beam_pa_deg) self.Kernel_xy2d = aconv.Gaussian2DKernel( x_stddev=stddev[0], y_stddev=stddev[1], theta=beampa )._array if len(conv_size) == 3 and (conv_size[2] is not None): Kernel_v1d = aconv.Gaussian1DKernel(stddev[2])._array self.Kernel_3d = np.multiply( # self.Kernel_xy2d[np.newaxis, :, :], self.Kernel_xy2d[:, :, np.newaxis], Kernel_v1d[np.newaxis, np.newaxis, :], )
def __call__( self, kernel=conv.Gaussian2DKernel(x_stddev=2), fill_nans=False, verbose=True, ): return self.spatial(kernel, fill_nans, verbose)
def conv_kernel_and_CCD_project_Gauss(image, image_angle_per_pixel, z=0., proj_dens=10, psf_rad=10): wavelength = wavelengths['halpha'] * (1. + z) R_airy_zero = airy_size(wavelength) pix_size_image_phys = image_angle_per_pixel / pix_size_angle * pix_size_phys seeing_limit_phys = seeing_limit / pix_size_angle * pix_size_phys kernel_radius = int(np.ceil(psf_rad * R_airy_zero / pix_size_image_phys)) if not kernel_radius % 2: #if kernel radius is even kernel_radius += 1 sigma = np.sqrt( (0.42 / 1.22 * R_airy_zero)**2 + seeing_limit_phys**2 / 2.3548**2 ) # using the smaller of two approximations for sigma corresponding to an Airy disk radius # For now: use a Gaussian PSF, with the seeing limit as FWHM psf = cnv.Gaussian2DKernel(sigma / pix_size_image_phys, x_size=6 * kernel_radius + 1, y_size=6 * kernel_radius + 1, mode='oversample') # calculate psf inside the kernel_radius psf_image = cnv.convolve(10**image, psf, boundary='wrap') #proj_grid = np.indices((image.shape[0]*proj_dens,image.shape[1]*proj_dens)) return psf_image
def gaussMask(self): import astropy.convolution as krn mask = np.zeros((self.par['xMax'], self.par['yMax'])) xx, yy = self.circMask() mask[xx, yy] = 1 gaus = krn.Gaussian2DKernel(self.gaussR) gmap = krn.convolve_fft(mask, gaus) return gmap
def convolve_psf(images, seeing): # We can apply convolution to all the images simultaneously # if the seeing is the same across all images # See tests at https://gist.github.com/19e446a494fbe5b5ad4c4384c23a55a9 stdev = seeing / 2.354 psf = conv.Gaussian2DKernel(stdev) psf = psf.array[None, ..., None] images = convolve(images, psf, mode='constant') return images
def convolve_evidence(store, kernel): """ Convolve the evidence maps and re-select the preferred number of model components. Products include: * 'conv_evidence' (m, b, l) * 'conv_nbest' (b, l) Parameters ---------- store : HdfStore kernel : number or `astropy.convolution.Kernel2D` Either a kernel instance or a number defining the standard deviation in map pixels of a Gaussian convolution kernel. """ print(':: Convolving evidence maps') if isinstance(kernel, (int, float)): kernel = convolution.Gaussian2DKernel(kernel) hdf = store.hdf dpath = store.dpath ncomp_max = hdf.attrs['n_max_components'] lnZ_thresh = hdf.attrs['lnZ_threshold'] # dimensions (m, b, l) data = hdf[f'{dpath}/evidence'][...] # dimensions (b, l) nbest = hdf[f'{dpath}/nbest'][...] cdata = np.zeros_like(data) # Spatially convolve evidence values. The convolution operator is # distributive, so C(Z1-Z0) should equal C(Z1)-C(Z0). for i in range(data.shape[0]): cdata[i, :, :] = convolution.convolve(data[i, :, :], kernel, boundary='extend') # Re-compute N-best with convolved data conv_nbest = np.full(cdata[0].shape, 0, dtype=np.int32) for i in range(ncomp_max): # each step must pass the evidence threshold, eg both 0->1 and 1->2 # where 1->2 should not be chosen if 0->1 was not. conv_nbest[(conv_nbest == i) & (cdata[i + 1] - cdata[i] > lnZ_thresh)] += 1 # refill the "NaN" values conv_nbest[nbest == -1] = -1 # Guard change in Nbest to no more than +1. In practice this should only apply # to a very small number of pixels but it will create errors because a jump # of +2 will not have had a model run for it. overshot = conv_nbest - nbest >= 2 conv_nbest[overshot] = nbest[overshot] + 1 # dimensions (b, l) store.create_dataset('conv_nbest', conv_nbest, group=dpath) # dimensions (m, b, l) store.create_dataset('conv_evidence', cdata, group=dpath)
def postprocess_run(field, store_suffix, utrans): print(f':: Post-processing {field}_{store_suffix}') # Standard deviation in pixels: 1.1 -> FWHM 0.28 as (cf. HPBW / 2 = 0.25 as) evid_kernel = convolution.Gaussian2DKernel(1.1) post_kernel = get_info_kernel(6) # 3.5 pixel radius circular window stack = get_cubestack(field) runner = get_runner(stack, utrans, ncomp=1) # begin post-processing steps with get_store(field, store_suffix) as store: nf.aggregate_run_attributes(store) nf.convolve_evidence(store, evid_kernel) nf.aggregate_run_products(store) nf.aggregate_run_pdfs(store) nf.deblend_hf_intensity(store, stack, runner)
def convolve(xda, kernel=None, fill_nans=False, verbose=True): import warnings warnings.filterwarnings("ignore", ".*A contiguous region of NaN values.*") def _convlve_timestep(xda, kernel, preserve_nan): convolved = xda.copy() convolved.values = conv.convolve(xda.values, kernel, preserve_nan=preserve_nan, boundary="wrap") return convolved ndims = len(xda.dims) preserve_nan = not fill_nans if kernel is None: kernel = conv.Gaussian2DKernel(x_stddev=2) elif isinstance(kernel, list): if len(kernel) == 2: kernel_size = kernel for i, ks in enumerate(kernel_size): kernel_size[i] += 0 if (ks % 2) else 1 kernel = conv.kernels.Box2DKernel(max(kernel_size)) kernel._array = kernel._array[:kernel_size[0], :kernel_size[1]] else: raise UserWarning( "If you pass a list to `kernel`, it needs to have a length of 2" ) elif kernel.__class__.__base__ == conv.core.Kernel2D: kernel = kernel else: raise UserWarning( "kernel needs to be a list or astropy.kernels.Kernel2D base type") if ndims == 2: convolved = _convlve_timestep(xda, kernel, preserve_nan) elif ndims == 3: convolved = [] for t in range(xda.shape[0]): convolved += (_convlve_timestep(xda[t], kernel, preserve_nan), ) convolved = xr.concat(convolved, dim=xda.dims[0]) kern_size = kernel.shape convolved.attrs["description"] = ( "same as `{}` but with {}x{}deg (lon x lat) smoothing using " "astropy.convolution.convolve").format(xda.name, kern_size[0], kern_size[1]) return convolved
def gaussian_filter(a, x_stddev, y_stddev=None, **kwargs): """ http://docs.astropy.org/en/stable/convolution/index.html """ kwargs_def = { 'fill_value': np.nan, 'preserve_nan': True, # 'boundary': 'extend', # 'nan_treatment': 'interpolate', } a2 = ap_conv.convolve(a, kernel=ap_conv.Gaussian2DKernel(x_stddev, y_stddev), **{ **kwargs_def, **kwargs }) return a2
def conv_kernel_and_CCD_project_Airy(image, image_angle_per_pixel, z=0., proj_dens=10, psf_rad=15): wavelength = wavelengths['halpha'] * (1. + z) R_airy_zero = airy_size(wavelength) pix_size_image_phys = image_angle_per_pixel / pix_size_angle * pix_size_phys seeing_limit_phys = seeing_limit / pix_size_angle * pix_size_phys kernel_radius = int(np.ceil(psf_rad * R_airy_zero / pix_size_image_phys)) if not kernel_radius % 2: #if kernel radius is even kernel_radius += 1 #for_kernel = np.indices((2*kernel_radius+1,)*2) - kernel_radius #for_kernel = np.sqrt(for_kernel[0]**2 + for_kernel[1]**2)*pix_size_image_phys # centre mode should be the same approach as for the smoothing kernels #automatically normalised so sum =1.0, Airy zero radius given in pixels airy_disk_psf = cnv.AiryDisk2DKernel(R_airy_zero / pix_size_image_phys, x_size=4 * kernel_radius + 1, y_size=4 * kernel_radius + 1, mode='oversample') # For now: use a Gaussian PSF, with the seeing limit as FWHM seeing_psf = cnv.Gaussian2DKernel(seeing_limit_phys / pix_size_image_phys / 2.3548, x_size=6 * kernel_radius + 1, y_size=6 * kernel_radius + 1, mode='oversample') # watch out for effects of extending out to larger radii at the corners than edge middles; just include the weird edges for now psf_tot = cnv.convolve(seeing_psf, airy_disk_psf, boundary='fill', fill_value=0.) # calculate psf inside the kernel_radius psf_image = cnv.convolve(10**image, psf_tot, boundary='wrap') #proj_grid = np.indices((image.shape[0]*proj_dens,image.shape[1]*proj_dens)) return psf_image
def convolve(map, fwhm, pow=4.7, size=5, kernel='moff'): map = np.ma.filled(map, fill_value=np.nan) print 'convolve: map shape=', np.shape(map) if kernel is 'moff': sig = fwhm / (2 * np.sqrt(2 ** (1. / pow) - 1.)) elif kernel is 'gauss': sig = gf2s * fwhm size = int(np.round(size * sig)) if size % 2 == 0: size += 1 print 'convolve: fwhm, sig, total size, phys_res=', fwhm, sig, size, fwhm * 26. / np.shape(map)[0] if kernel is 'moff': kernel = con.Moffat2DKernel(sig, pow, x_size=size, y_size=size) elif kernel is 'gauss': kernel = con.Gaussian2DKernel(sig, x_size=size, y_size=size) result = con.convolve_fft(map, kernel, normalize_kernel=True) median = np.median(np.ma.compressed(np.ma.masked_where(map <= 0, map))) result[np.log10(np.abs(median)) - np.log10(np.abs( result)) > 10.] = 0. # discarding any resulting pixel that is more than O(10) lower than input data, to avoid round off error pixels return result
def get_moment0_map(self, **kwargs): """Returns moment 0 map in Jy*km/s per pixel as numpy array. Parameters ---------- line : str Line to use for map, default: 'CII' convolve : bool if True: convolve with beam of FWHM also supplied, default: False FWHM : float FWHM of beam to convolve with, default: None ISM_dc_phase : str ISM datacube phase to use for moment 0 map, default: 'tot' (all ISM phases) units : str units for moment0 map, default: 'Jykms' for Jy*km/s (other options: 'Wm2' for W/m^2) """ for key, val in kwargs.items(): exec('globals()["' + key + '"]' + '=val') if ISM_dc_phase == 'tot': dc_summed = self.__get_dc_summed(target=line) if ISM_dc_phase != 'tot': dc_summed = self.__get_dc_phase(target=line, ISM_dc_phase=ISM_dc_phase) mom0 = dc_summed.sum(axis=0) * v_res # Jy*km/s per pixel if convolve: self.FWHM_arcsec = aux.get_Herschel_FWHM(line) # self.FWHM_arcsec = 1 self.FWHM_kpc = np.arctan(self.FWHM_arcsec / 60. / 60. / 360. * 2. * np.pi) * self.gal_ob.ang_dist_kpc print( 'Convolving moment 0 map with beam of FWHM of size %.4s arcsec (%.4s kpc)' % (self.FWHM_arcsec, self.FWHM_kpc)) kernel = convol.Gaussian2DKernel(aux.FWHM_to_stddev(self.FWHM_kpc)) mom0 = convol.convolve(mom0, kernel) mom0 = mom0.T # in order to compare with particle data when plotting return mom0
def test_convolution(): cube, data = cube_and_raw('255_delta.fits') # 1" convolved with 1.5" -> 1.8027.... target_beam = Beam(1.802775637731995*u.arcsec, 1.802775637731995*u.arcsec, 0*u.deg) conv_cube = cube.convolve_to(target_beam) expected = convolution.Gaussian2DKernel((1.5*u.arcsec / beam.SIGMA_TO_FWHM / (5.555555555555e-4*u.deg)).decompose().value, x_size=5, y_size=5, ) np.testing.assert_almost_equal(expected.array, conv_cube.filled_data[0,:,:].value) # 2nd layer is all zeros assert np.all(conv_cube.filled_data[1,:,:] == 0.0)
def postprocess_run(target, store_prefix): print(f':: Post-processing {target}') # Standard deviation in pixels: 0.85 -> FWHM 17.6 (cf. HPBW / 2 = 16 as) evid_kernel = convolution.Gaussian2DKernel(0.85) post_kernel = get_info_kernel(3) # 3.5 pixel radius circular window utrans = get_keystone_priors(target) par_bins = get_bins(target) store_name = f'run/{store_prefix}_{target}' store = nf.HdfStore(store_name) stack = get_cubestack(target) runner = get_runner(stack, utrans, ncomp=1) # begin post-processing steps nf.aggregate_run_attributes(store) nf.convolve_evidence(store, evid_kernel) nf.aggregate_run_products(store) nf.aggregate_run_pdfs(store, par_bins=par_bins) nf.convolve_post_pdfs(store, post_kernel, evid_weight=False) nf.quantize_conv_marginals(store) nf.deblend_hf_intensity(store, stack, runner) store.close()
def test_convolution_2D(): proj, hdu = load_projection("55_delta.fits") # 1" convolved with 1.5" -> 1.8027.... target_beam = Beam(1.802775637731995 * u.arcsec, 1.802775637731995 * u.arcsec, 0 * u.deg) conv_proj = proj.convolve_to(target_beam) expected = convolution.Gaussian2DKernel( (1.5 * u.arcsec / beam.SIGMA_TO_FWHM / (5.555555555555e-4 * u.deg)).decompose().value, x_size=5, y_size=5, ) expected.normalize() np.testing.assert_almost_equal(expected.array, conv_proj.value) assert conv_proj.beam == target_beam
def spatial(self, kernel=None, fill_nans=False, verbose=True): xda = self._obj ndims = len(xda.dims) preserve_nan = not fill_nans if kernel is None: kernel = conv.Gaussian2DKernel(x_stddev=2) elif isinstance(kernel, list): if len(kernel) == 2: kernel_size = kernel for i, ks in enumerate(kernel_size): kernel_size[i] += 0 if (ks % 2) else 1 kernel = conv.kernels.Box2DKernel(max(kernel_size)) kernel._array = kernel._array[: kernel_size[0], : kernel_size[1]] else: raise UserWarning( "If you pass a list to `kernel`, must have a length of 2" ) elif kernel.__class__.__base__ == conv.core.Kernel2D: kernel = kernel else: raise UserWarning( "kernel needs to be list or astropy.kernels.Kernel2D base type" ) if ndims == 2: convolved = self._convlve_timestep(xda, kernel, preserve_nan) elif ndims == 3: convolved = [] for t in range(xda.shape[0]): if verbose: print(".", end="") convolved += (self._convlve_timestep(xda[t], kernel, preserve_nan),) convolved = xr.concat(convolved, dim=xda.dims[0]) kern_size = kernel.shape convolved.attrs["description"] = ( "same as `{}` but with {}x{}deg (lon x lat) smoothing using " "astropy.convolution.convolve" ).format(xda.name, kern_size[0], kern_size[1]) return convolved
def extended_masked_evidence(store, kernel, conv=True, lnz_thresh=3): """ Mask the local or convolved evidence maps on a threshold for a second convolution to identify weak spatially extended features. Products include: * 'mext_evidence_diff' (b, l) Parameters ---------- store : HdfStore kernel : number or `astropy.convolution.Kernel2D` Either a kernel instance or a number defining the standard deviation in map pixels of a Gaussian convolution kernel. conv : bool Use the convolved (`conv_evidence`) versus the local evidence (`evidence`) lnz_thresh : number Threshold to mask the initial evidence map on. """ print(':: Convolving masked evidence') if isinstance(kernel, (int, float)): kernel = convolution.Gaussian2DKernel(kernel) hdf = store.hdf dpath = store.dpath # dimensions (m, b, l) data = hdf[f'{dpath}/evidence'][...] # dimensions (m, b, l) ev_name = 'conv_evidence' if conv else 'evidence' mdata = hdf[f'{dpath}/{ev_name}'][...] mdata = mdata[1] - mdata[0] mask = mdata > lnz_thresh # Spatially convolve the masked evidence values with the new kernel. cdata = nans(data.shape) for i in range(data.shape[0]): data[i, mask] = np.nan cdata[i, :, :] = convolution.convolve(data[i, :, :], kernel, boundary='extend') mext = cdata[1] - cdata[0] # refill the NaN values after the convolution interpolates over them mext[np.isnan(mdata) | mask] = np.nan # dimensions (b, l) store.create_dataset('mext_evidence', mext, group=dpath)
def postprocess_run(target, store_prefix): evid_kernel = convolution.Gaussian2DKernel(1.5) # std-dev in pixels s2 = np.sqrt(2) / 2 k_arr = np.array([ [s2**2, s2**1, s2**2], [s2**1, s2**0, s2**1], [s2**2, s2**1, s2**2], ]) post_kernel = convolution.CustomKernel(k_arr) utrans = get_irdc_priors(vsys=VELOS[target]) par_bins = get_bins(VELOS[target]) store_name = f'data/run/{store_prefix}_{target}' store = nf.HdfStore(store_name) stack = get_cubestack(target) runner = get_runner(stack, utrans, ncomp=1) # begin post-processing steps nf.aggregate_run_attributes(store) nf.convolve_evidence(store, evid_kernel) nf.aggregate_run_products(store) nf.aggregate_run_pdfs(store, par_bins=par_bins) nf.convolve_post_pdfs(store, post_kernel, evid_weight=False) nf.quantize_conv_marginals(store) nf.deblend_hf_intensity(store, stack, runner) store.close()
beam = 24.9 img = img + 31.697 filfind = fil_finder_2D(img, hdr, beam, glob_thresh=20, distance=145.) filfind.create_mask()#size_thresh=400) filfind.medskel() filfind.analyze_skeletons() filfind.exec_rht() filfind.find_widths(verbose=False) r = 460. / 145. conv = np.sqrt(r ** 2. - 1) * \ (beam / np.sqrt(8*np.log(2)) / (np.abs(hdr["CDELT2"]) * 3600.)) kernel = convolution.Gaussian2DKernel(conv) good_pixels = np.isfinite(img) nan_pix = np.ones(img.shape) nan_pix[good_pixels == 0] = np.NaN conv_img = convolution.convolve(img, kernel, boundary='fill', fill_value=np.NaN) # Avoid edge effects from smoothing conv_img = conv_img * nan_pix filfind2 = fil_finder_2D(conv_img, hdr, conv*beam, glob_thresh=20, distance=145.) filfind2.create_mask() filfind2.medskel() filfind2.analyze_skeletons() filfind2.exec_rht()
def smooth_image(image_stream, stddev=2.0): kernel = conv.Gaussian2DKernel(stddev) for image in image_stream: yield conv.convolve(image, kernel)
import uvcombine.utils import make_extended from astropy import convolution import numpy as np import pylab as pl # deconvolve using skimage techniques from skimage.restoration import unsupervised_wiener, richardson_lucy np.random.seed(0) im = make_extended(imsize=256., powerlaw=1.5) singledish_kernel = convolution.Gaussian2DKernel(40/2.35, x_size=256, y_size=256) singledish_kernel_fft = np.fft.fft2(singledish_kernel) singledish_im = convolution.convolve_fft(im, convolution.Gaussian2DKernel(40/2.35), boundary='fill', fill_value=im.mean()) # deconvolve in fourier space # This "works", but requires that you set a limit on where you perform the # fourier-space division that is pretty arbitrary / empirical, otherwise you # just get junk out. deconv_kernel = singledish_kernel_fft.copy() badpix = np.abs(deconv_kernel) < 1e-1 imfft = np.fft.fft2(im) naive_deconvolution_fft = (imfft / deconv_kernel) naive_deconvolution_fft[badpix] = imfft[badpix] naive_deconvolution = np.fft.ifft2(naive_deconvolution_fft) pl.clf() pl.imshow(np.fft.fftshift(naive_deconvolution.real), cmap='viridis')
bbox_inches='tight') im.set_visible(True) for line in lines: line.set_visible(False) leg.remove() xx,vv,pvd = thindiskcurve(mass=15*u.M_sun, rmin=25*u.au, rmax=65*u.au, pvd=True, ) xx_as = (xx/d_orion).to(u.arcsec, u.dimensionless_angles()) conv_beam = convolution.Gaussian2DKernel(0.04*u.arcsec/np.diff(xx_as).mean(), 1.5*u.km/u.s/np.diff(vv).mean(), ) sm_pvd = convolution.convolve_fft(pvd, conv_beam) ax1.cla() ax1.imshow(sm_pvd, cmap='gray_r', extent=[xx_as.value.min(), xx_as.value.max(), (vv+assumed_vcen).value.min(), (vv+assumed_vcen).value.max()], interpolation='none', origin='lower', vmin=0, vmax=sm_pvd[np.abs(vv)<20*u.km/u.s].max(), label=('$M={0:0.1f}$\n$R_{{in}}={1:d}$\n$R_{{out}}={2:d}$' .format(15, 25, 65)) )
def observation( instrument, redshift, filename, quantity='v_disp', qtytitle=r'$\sigma$', ###END ROMULUSC ###FOR TNG # ds = yt.load('/nobackupp8/jzuhone/tng/halo_3.hdf5') # # Center of cluster # C = ds.arr([127713.06189105, 120893.9511367 , 77686.08604591], "kpc") # # R200 of cluster # R200 = ds.quan(2035.5552580985202, "kpc") # def _emission_measure(field,data): # return data[(field,'rho')]**2 # ds.add_field(("gas","rhosq"), function=_emission_measure) # def _emission_measure(field, data): # nenh = data["PartType0","Density"]*data["PartType0",'particle_mass'] # nenh /= mp*mp # nenh.convert_to_units("cm**-3") # X_H = 0.76 # nenh *= X_H * data["PartType0", 'ElectronAbundance'] # nenh *= X_H * (1.-data["PartType0", 'NeutralHydrogenAbundance']) # return nenh # ds.add_field(("PartType0", 'emission_measure'), # function=_emission_measure, # particle_type=True, # units="cm**-3", force_override=True) # def _vz_squared(field, data): # return data["gas","velocity_z"]*data["gas","velocity_z"] # ds.add_field(("gas","velocity_z_squared"), _vz_squared, units="cm**2/s**2", force_override=True) # This defines a box around the cluster with width 4.0*R200 just to get everything # le = C-2.0*R200 # re = C+2.0*R200 # reg = ds.box(le, re) # # This projects along the line of sight # prj = reg.integrate(("gas","velocity_z"), weight=("PartType0","emission_measure"), axis="z") # ###END TNG # def observation(instrument, redshift, filename, qtytitle=r'$\sigma$', width=5000, vmin=1, vmax=1000, suffix='', step=150): Mpc_rad = lcdm.angular_diameter_distance(redshift) kpc_arcsec = Mpc_rad.to('kpc') / units.radian.in_units('arcsec') width_arcsec = width / kpc_arcsec #arcsec/side pixel_res = fov[instrument] / pixel_side[ instrument] #arcsec/pix for instrument resolution = (width_arcsec / pixel_res).value # nx = int(resolution) # frb = prj.to_frb((5.0, "Mpc"), nx, center=C) # Compute sigma2 # sigma2 = frb["gas","velocity_z_squared"].to_value("km**2/s**2")-frb["gas","velocity_z"].to_value("km/s")**2 # # Compute emission measure # EM = frb["PartType0","emission_measure"].d # #block/bin image same as instrument vdisp = image(h.g, resolution=resolution, qty=quantity, width=str(width) + ' kpc', qtytitle=qtytitle, av_z='rhosq', title='%0.2f' % redshift, cmap=cm.magma, vmin=1, vmax=max(250, vmax), log=False, noplot=True) print("image binned") EM = image(h.g, resolution=resolution, qty='rhosq', width=str(width) + ' kpc', qtytitle=qtytitle, av_z='vol', title='%0.2f' % redshift, cmap=cm.magma, vmin=1, vmax=max(250, vmax), log=False, noplot=True) fwhm_pix = psf[instrument] / pixel_res std_pix = fwhm_pix / 2.355 kernel = convolution.Gaussian2DKernel(stddev=std_pix) conv = convolution.convolve(np.sqrt(vdisp) * rhosq, kernel) / convolution.convolve(EM, kernel) print("image convolved with PSF") xticks = np.linspace(-width / 2., width / 2., int(resolution)) / kpc_arcsec.value fig, ax = plt.subplots() a1 = ax.imshow(conv.T, cmap=cm.magma, norm=matplotlib.colors.Normalize(vmin, vmax)) c = (sigma2.shape[0] / 2, sigma2.shape[0] / 2) r500_arcsec = 1400 / kpc_arcsec.value #change 1400 to r500 in kpc rad = r500_arcsec / pixel_res circ = matplotlib.patches.Circle(c, rad, edgecolor='w', linestyle='dotted', linewidth=3, fill=False) ax.add_patch(circ) rad /= 0.7 #r500 --> r200 circ = matplotlib.patches.Circle(c, rad, edgecolor='w', linestyle='dotted', linewidth=3, fill=False) ax.add_patch(circ) plt.xticks( np.arange(int(resolution) / step) * step, ['%d' % tick for tick in xticks[::step]]) plt.yticks( np.arange(int(resolution) / step) * step, ['%d' % tick for tick in xticks[::step]]) plt.xlabel('R (")') plt.ylabel('R (")') plt.colorbar(a1, ax=ax) plt.savefig(filename + '_' + instrument + suffix)
def create_map(filename, fwhm, smooth=2, planets=None, saveOutput=True, outputName=None): """ creates signal to noise ratio map of image. Required Input: 1. String containing filename of original klipped image OR object containing data already taken from original klipped image Optional Inputs: 1. Tuple containing the following lists: a. List of radial coordinates of planets in data b. List of corresponding position angles of planets in data (must be same length of a) c. List containing radial thickness of deired mask on either side of the planet, followed by the disired angular thickness *default value: None* 2. Boolean designating whether or not to save the completed map to disk *default value: True* file input example, without mask, saving final map to disk: SNRMap.create_map("med_HD142527_8Apr14short_SDI_a7m3-10KLmodes.fits", saveOutput = True) object input example, with mask, without saving final map to disk: SNRMap.create_map(data, planets = planetData) (where >>> planetData = [12, 20, 30, 50], [40, 100, 60, 150], [10, 5]) Written by: Clare Leonard Last Modified: 6/28/2017 """ #checks data type of 'filename' # if 'filename' is a string, assumes it is a filepath and reads in file if (isinstance(filename, str)): inp = read_file(filename) #if data type is not a string, reads in python object holding data else: inp = filename #temporary test begins here - remove to keyword with tunable FWHM and separate smoothing function if works gauss = conv.Gaussian2DKernel(stddev=smooth) inpsm = conv.convolve(inp, gauss, preserve_nan=True) inp = inpsm #end test #creates dictionary holding the standard deviation of pixlel values at each radius #stdMap = stdevMap(inp, planets, fwhm) #gets size of pixel value array try: zDim, yDim, xDim = np.shape(inp) except: yDim, xDim = np.shape(inp) zDim = 1 global XCenter global YCenter XCenter = (xDim - 1) / 2 YCenter = (yDim - 1) / 2 Output = np.zeros((zDim, yDim, xDim)) for s in range(zDim): try: indiv = inp[s, :, :] except: indiv = inp #creates dictionary holding the standard deviation of pixlel values at each radius stdMap = stdevMap(indiv, planets, fwhm) #loops through all pixels in array for x in range(xDim): for y in range(yDim): #converts indeces to polar coordinates radius, angle = toPolar(x, y) #use for debugging if you want to see where the mask is: #if (isPlanet(radius, angle, planets)): #indiv[x][y] = np.nan #if enough pixels have been found to calculate a standard deviation for this pixels radius, the pixel value is divided by the standard deviation of pixels at that radius try: #if statement prevents a divide by zero warning message if (stdMap[radius] == 0): indiv[x][y] = np.nan else: indiv[x][y] = indiv[x][y] / stdMap[radius] #debugging step to show noise map: #indiv[x][y] = stdMap[radius] #if no standard deviation has been calculated, pixel is given a nan value except: indiv[x][y] = np.nan Output[s, :, :] = indiv #saves output to disk if saveOutput designated True if (saveOutput == True): hdu = fits.PrimaryHDU(Output) hdulist = fits.HDUList([hdu]) newname = str(nameOutput(filename, outputName)) hdulist.writeto(newname, overwrite=True) print("Wrote %s to " % newname + os.getcwd()) #returns final SNR map return Output
def convolve_post_pdfs(store, kernel, evid_weight=True): """ Spatially convolve the model posterior PDF. Products include: * 'conv_post_pdfs' (r, m, p, h, b, l) Parameters ---------- store : HdfStore kernel : number or `astropy.convolution.Kernel2D` Either a kernel instance or a number defining the standard deviation in map pixels of a Gaussian convolution kernel. evid_weight : bool, default True Use the evidence over the null model to weight the pixel data. """ print(':: Convolving posterior PDFs') if isinstance(kernel, (int, float)): kernel = convolution.Gaussian2DKernel(kernel) hdf = store.hdf dpath = store.dpath ncomp_max = hdf.attrs['n_max_components'] # dimensions (r, m, p, h, b, l) data = hdf[f'{dpath}/post_pdfs'][...] cdata = np.zeros_like(data) # Fill zeros to avoid problems with zeros in log product data[data == 0] = 1e-32 ldata = np.log(data) if evid_weight: # dimensions (m, b, l) evid = hdf[f'{dpath}/evidence'][...] # dimensions (b, l) nbest = hdf[f'{dpath}/conv_nbest'][...] # compute difference between preferred model number and zero. z_best = take_by_components(evid[1:, :, :], nbest) d_evid = z_best - evid[0, :, :] # transform to interval [0.0, 1.0] d_evid -= np.nanmin(d_evid) d_evid /= np.nanmax(d_evid) d_evid = d_evid.reshape((1, 1, 1, 1, *d_evid.shape)) # weight the PDF distributions by the delta-evidence ldata *= d_evid # Spatially convolve the (l, b) map for every (model, parameter, # histogram) set. cart_prod = itertools.product( range(data.shape[0]), # r range(data.shape[1]), # m range(data.shape[2]), # p range(data.shape[3]), # h ) for i_r, i_m, i_p, i_h in cart_prod: if i_m > i_r: continue cdata[i_r, i_m, i_p, i_h, :, :] = convolution.convolve_fft(ldata[i_r, i_m, i_p, i_h, :, :], kernel, normalize_kernel=False) # convert back to linear scaling cdata = np.exp(cdata) # ensure the PDFs are normalized cdata /= np.nansum(cdata, axis=3, keepdims=True) # re-mask the NaN positions cdata[np.isnan(data)] = np.nan store.create_dataset('conv_post_pdfs', cdata, group=dpath)
import FITS_tools import reproject import paths import image_registration from radio_beam import Beam from astropy import convolution import gaussfitter epoch3 = fits.open( paths.dpath( "W51Ku_BDarray_continuum_2048_both_uniform.hires.clean.image.fits")) beam3 = Beam.from_fits_header(epoch3[0].header) epoch3[0].data = epoch3[0].data.squeeze() wcs3 = wcs.WCS(epoch3[0].header).sub([wcs.WCSSUB_CELESTIAL]) epoch3header = wcs3.to_header() epoch3header['NAXIS'] = 2 epoch3header['NAXIS1'] = epoch3[0].data.shape[1] epoch3header['NAXIS2'] = epoch3[0].data.shape[0] kernel = (0.3 * u.arcsec).to(u.deg) pixscale = (wcs3.pixel_scale_matrix.diagonal()**2).sum()**0.5 * u.deg print('pix kernel size: {0}'.format(kernel / pixscale)) smoothed3 = convolution.convolve_fft( epoch3[0].data, convolution.Gaussian2DKernel(kernel / pixscale)) diff3sm = epoch3[0].data - smoothed3 diff3smhdu = fits.PrimaryHDU(data=diff3sm, header=epoch3header) diff3smhdu.writeto(paths.dpath("Kuband_Epoch3sm-Epoch3.fits"), clobber=True, output_verify='fix')
import FITS_tools from astropy.io import fits from masked_cubes import cube303 from astropy import convolution import numpy as np column_image = fits.open('/Users/adam/work/gc/gcmosaic_column_conv36.fits')[0] dusttem_image = fits.open('/Users/adam/work/gc/gcmosaic_temp_conv36.fits')[0] # fix NaNs by convolving col_conv = convolution.convolve_fft(column_image.data, convolution.Gaussian2DKernel(2), nan_treatment='interpolate', normalize_kernel=True) whnan = np.isnan(column_image.data) column_image.data[whnan] = col_conv[whnan] dusttem_conv = convolution.convolve_fft(dusttem_image.data, convolution.Gaussian2DKernel(2), nan_treatment='interpolate', normalize_kernel=True) whnan = np.isnan(dusttem_image.data) dusttem_image.data[whnan] = dusttem_conv[whnan] apex_header = cube303[0, :, :].hdu.header column_regridded = FITS_tools.hcongrid.hcongrid_hdu(column_image, apex_header) dusttem_regridded = FITS_tools.hcongrid.hcongrid_hdu(dusttem_image, apex_header)
def create_map(filename, fwhm, smooth=False, planets=None, saveOutput=True, outputName=None, checkmask=False, noisemap=False): """ creates signal to noise ratio map of image. Required Input: 1. String containing filename of original klipped image OR object containing data already taken from original klipped image Optional Inputs: 1. Tuple containing the following lists: a. List of radial coordinates of planets in data b. List of corresponding position angles of planets in data (must be same length of a) c. List containing radial thickness of deired mask on either side of the planet, followed by the disired angular thickness *default value: None* 2. Boolean designating whether or not to save the completed map to disk *default value: True* file input example, without mask, saving final map to disk: SNRMap.create_map("med_HD142527_8Apr14short_SDI_a7m3-10KLmodes.fits", saveOutput = True) object input example, with mask, without saving final map to disk: SNRMap.create_map(data, planets = planetData) (where >>> planetData = [12, 20, 30, 50], [40, 100, 60, 150], [10, 5]) Written by: Clare Leonard Last Modified: Feb 2019 by KBF - added checkmask and noisemap keywords, removed default smooth Mar 2019 by KBF - returning max pixel under mask, adding loop over 3rd dimension so can generate 3D SNRmaps, return snrs and masked images """ print('this is the REPAIRED SNRMap code') #checks data type of 'filename' # if 'filename' is a string, assumes it is a filepath and reads in file if (isinstance(filename, str)): inp = read_file(filename) #if data type is not a string, reads in python object holding data else: inp = filename #smooth input image by specified amount if smooth > 0: print("smoothing") gauss = conv.Gaussian2DKernel(stddev=smooth) inpsm = conv.convolve(inp, gauss, preserve_nan=True) inp = inpsm #creates dictionary holding the standard deviation of pixlel values at each radius #stdMap = stdevMap(inp, planets, fwhm) #gets size of pixel value array try: zdim, ydim, xdim = np.shape(inp) except: ydim, xdim = np.shape(inp) zdim = 1 global XCenter global YCenter XCenter = (xdim - 1) / 2 YCenter = (ydim - 1) / 2 Output = np.zeros((zdim, ydim, xdim)) if checkmask == True: msks = np.ones((zdim, ydim, xdim)) msk = np.ones((ydim, xdim)) if noisemap == True: noises = np.ones((zdim, ydim, xdim)) noise = np.ones((ydim, xdim)) snrs = np.zeros(zdim) planet_pixels = np.ones((ydim, xdim)) * np.nan for s in range(zdim): try: indiv = inp[s, :, :] except: indiv = inp #creates dictionary holding the standard deviation of pixlel values at each radius stdMap = stdevMap(indiv, planets, fwhm) #loops through all pixels in array for x in range(xdim): for y in range(ydim): #converts indices to polar coordinates radius, angle = toPolar(x, y) if checkmask == True: if (isPlanet(radius, angle, planets)): msk[x][y] = 0 #if enough pixels have been found to calculate a standard deviation for this pixels radius, the pixel value is divided by the standard deviation of pixels at that radius try: #if statement prevents a divide by zero warning message if (stdMap[radius] == 0): indiv[x][y] = np.nan else: indiv[x][y] = indiv[x][y] / stdMap[radius] if noisemap == True: noise[x][y] = stdMap[radius] * 5 #if no standard deviation has been calculated, pixel is given a nan value except: indiv[x][y] = np.nan # captures an array with just the planet pixels if (isPlanet(radius, angle, planets)): planet_pixels[x][y] = indiv[x][y] Output[s, :, :] = indiv if checkmask == True: msks[s, :, :] = msk if noisemap == True: noises[s, :, :] = noise snrs[s] = np.nanmax(planet_pixels) print("max SNR under mask is", snrs[s]) #saves output to disk if saveOutput designated True if (saveOutput == True): hdu = fits.PrimaryHDU(Output) hdulist = fits.HDUList([hdu]) newname = str(nameOutput(filename, outputName)) hdulist.writeto(newname, overwrite=True) print("Wrote %s to " % newname + os.getcwd()) if checkmask == True: maskedims = msks * indiv fits.writeto(newname[:-5] + '_masked.fits', msks * indiv, overwrite=True) if noisemap == True: fits.writeto('noisemap.fits', noises, overwrite=True) #returns final SNR map if checkmask == True: return Output, snrs, maskedims else: return Output
def generate_kernel2(line, boxlength, numpix, kernel, z, psf_rad=None, tag=''): if psf_rad == None: if kernel == 'gauss': psf_rad = 10 if kernel == 'airy': psf_rad = 300 # angle_per_pix comdist = comoving_distance_cm(z) longlen = float(boxlength) * cons.cm_per_mpc if comdist > longlen: # even at larger values, the projection along z-axis = projection along sightline approximation will break down adist = comdist / (1. + z) else: adist = longlen / (1. + z) image_angle_per_pixel = longlen / numpix * 1. / adist * radians wavelength = wavelengths[line] * (1. + z) R_airy_zero = airy_size(wavelength) pix_size_image_phys = image_angle_per_pixel / pix_size_angle * pix_size_phys seeing_limit_phys = seeing_limit / pix_size_angle * pix_size_phys kernel_radius = int( np.ceil(psf_rad * max(R_airy_zero, seeing_limit_phys) / pix_size_image_phys)) if not kernel_radius % 2: #if kernel radius is even kernel_radius += 1 if kernel == 'gauss': sigma = np.sqrt( (0.42 / 1.22 * R_airy_zero)**2 + seeing_limit_phys**2 / 2.3548**2 ) # using the smaller of two approximations for sigma corresponding to an Airy disk radius # For now: use a Gaussian PSF, with the seeing limit as FWHM psf = cnv.Gaussian2DKernel(sigma / pix_size_image_phys, x_size=6 * kernel_radius + 1, y_size=6 * kernel_radius + 1, mode='oversample') psf.normalize(mode='integral') if kernel == 'airy': airy_disk_psf = cnv.AiryDisk2DKernel(R_airy_zero / pix_size_image_phys, x_size=4 * kernel_radius + 1, y_size=4 * kernel_radius + 1, mode='oversample') # For now: use a Gaussian PSF, with the seeing limit as FWHM seeing_psf = cnv.Gaussian2DKernel(seeing_limit_phys / pix_size_image_phys / 2.3548, x_size=6 * kernel_radius + 1, y_size=6 * kernel_radius + 1, mode='oversample') # watch out for effects of extending out to larger radii at the corners than edge middles; just include the weird edges for now psf = cnv.convolve_fft(seeing_psf.array, airy_disk_psf, boundary='fill', fill_value=0.) psf = psf / np.sum(psf) return { '(%s,%s,%s,%s,%s,%s)' % (str(boxlength), str(numpix), str(z), line, str(psf_rad), kernel) + tag: psf }