def conv(image, beam=0.1, r_ou=200, dist=140, gg=''): """ convolve an image with given beam """ from numpy import sqrt from numpy import log from astropy.convolution import convolve_fft, Gaussian2DKernel N = image.shape[0] #convert FWHM -> stddev beam_x = beam beam_y = beam beam_stddev_x = beam_x/(2.*sqrt(2*log(2))) beam_stddev_y = beam_y/(2.*sqrt(2*log(2))) arcs = r_ou / dist pxwitdth = 2.0*arcs/N g_width = beam_stddev_x/pxwitdth gauss = Gaussian2DKernel(stddev=g_width) #stddev width in px # if gg is a given Kernel if gg != '': z1 = convolve_fft(image, gg, normalize_kernel=True) else: z1 = convolve_fft(image, gauss, normalize_kernel=True) # rescale flux # correct from Jy/px to mJy/beam z1 *= (beam_x*beam_y/4.)/pxwitdth**2*1000 return z1
def do_convolutions(self): for i, lag in enumerate(self.lags): core = core_kernel(lag, self.img.shape[0], self.img.shape[1]) annulus = annulus_kernel(lag, self.diam_ratio, self.img.shape[0], self.img.shape[1]) # Extend to avoid boundary effects from non-periodicity pad_weights = np.pad(self.weights, int(lag), padwithzeros) pad_img = np.pad(self.img, int(lag), padwithzeros) * pad_weights interpolate_nan = False if self.nanflag: interpolate_nan = True img_core = convolve_fft( pad_img, core, normalize_kernel=True, interpolate_nan=interpolate_nan, ignore_edge_zeros=True ) img_annulus = convolve_fft( pad_img, annulus, normalize_kernel=True, interpolate_nan=interpolate_nan, ignore_edge_zeros=True ) weights_core = convolve_fft( pad_weights, core, normalize_kernel=True, interpolate_nan=interpolate_nan, ignore_edge_zeros=True ) weights_annulus = convolve_fft( pad_weights, annulus, normalize_kernel=True, interpolate_nan=interpolate_nan, ignore_edge_zeros=True ) weights_core[np.where(weights_core == 0)] = np.NaN weights_annulus[np.where(weights_annulus == 0)] = np.NaN self.convolved_arrays.append((img_core / weights_core) - (img_annulus / weights_annulus)) self.convolved_weights.append(weights_core * weights_annulus) return self
def wrong_fft(): synthim = np.random.randn(100,100) from astropy.convolution import convolve_fft,convolve yy,xx = np.indices(synthim.shape) rr =( (xx-99/2.)**2 + (yy-99/2.)**2)**0.5 sf = [np.sum(np.abs(synthim - convolve_fft(synthim, (rr > ii) & (rr < ii+1)))) for ii in np.arange(0, rr.shape[0]/2., dtype='float') ] # wrapped version: sf = [np.sum(np.abs(synthim - convolve_fft(synthim, (rr > ii) & (rr <ii+1), boundary='wrap'))) for ii in p.arange(0, rr.shape[0]/2.,dtype='float') ]
def filter_convolve(data, filters, filter_rot=False): if filter_rot: return np.sum([convolve_fft(coef, f, boundary='wrap', crop=True) for coef, f in zip(data, rotate_stack(filters))], axis=0) else: return np.array([convolve_fft(data, f, boundary='wrap', crop=True) for f in filters])
def make_a_fucking_image(grid, kernelsize=50.): size = grid.shape[0] center = size/2 kernel = Gaussian2DKernel(size/kernelsize, x_size=size, y_size=size) is_point = 1 * (grid > 0) weights = convolve_fft(is_point, kernel) invalid = np.where(weights < 0.00001) weights[invalid] = 1 tha_result = convolve_fft(grid, kernel) tha_result[invalid] = 0 return tha_result / weights
def locate(i): """ Median subtract each hologram, convolve with Mexican hat kernel, then smooth the absolute value of the convolution, and use Otsu's thresholding to segment the image into specimens. Record the time, x and y centroids, and some intensity features within each segment. """ median_sub_holo = hologram_cube[i, ...] - median_holo conv_holo = convolve_fft(median_sub_holo, MexicanHat2DKernel(convolution_kernel_radius), fftn=fft2, ifftn=ifft2) smooth_abs_conv = gaussian_filter(np.abs(conv_holo), gaussian_kernel_radius) thresh = threshold_otsu(smooth_abs_conv - np.median(smooth_abs_conv)) # thresh = threshold_yen(smooth_abs_conv - np.median(smooth_abs_conv)) masked = np.ones_like(smooth_abs_conv) masked[smooth_abs_conv <= thresh] = 0 label_image = label(masked) regions = regionprops(label_image, smooth_abs_conv) for region in regions: centroid = region.weighted_centroid pos = (i, centroid[0], centroid[1], region.max_intensity, region.mean_intensity) positions.append(pos)
def gsmooth_cube(cube, kernelsize, use_fft=True, psf_pad=False, fft_pad=False, kernelsize_mult=8, **kwargs): """ Smooth a cube with a gaussian in 3d Because even a tiny cube can become enormous if you have, say, a 1024x32x32 cube, padding is off by default """ if cube.ndim != 3: raise ValueError("Wrong number of dimensions for a data cube") #z,y,x = np.indices(cube.shape) # use an odd kernel size for non-fft, even kernel size for fft ks = (np.array(kernelsize)*kernelsize_mult).astype('int') if np.any(ks % 2 == 0) and not use_fft: ks[ks % 2 == 0] += 1 z,y,x = np.indices(ks) kernel = np.exp(-((x-x.max()/2.)**2 / (2.*(kernelsize[2])**2) + (y-y.max()/2.)**2 / (2.*(kernelsize[1])**2) + (z-z.max()/2.)**2 / (2.*(kernelsize[0])**2))) if use_fft: return convolve_fft(cube, kernel, normalize_kernel=True, psf_pad=psf_pad, fft_pad=fft_pad, **kwargs) else: return convolve(cube, kernel, normalize_kernel=True, **kwargs)
def extract_profile(hdu, wavrest, dset, imid, dw=4.0, dwbg_in=6.0, dwbg_out=8.0, isT=False, smooth=10): jwav = 1 if isT else 0 w = makeWCS(hdu.header, dset, imid, jwav) # Make sure array axis order is (position, wavelength) data = hdu.data.T if isT else hdu.data nslit, nwav = data.shape dwav = w.wcs.get_cdelt()[jwav]*w.wcs.get_pc()[jwav, jwav] sgn = np.sign(dwav) # Need to take slices backwards if this is negative print('Check: wavrest = {}, dwav = {}, nslit = {}, nwav = {}'.format(wavrest, dwav, nslit, nwav)) # pixel limits for line extraction i1 = wav2pix(wavrest-dw/2, w, nwav, isT) i2 = wav2pix(wavrest+dw/2, w, nwav, isT) # pixel limits for blue bg extraction iblu1 = wav2pix(wavrest-dwbg_out/2, w, nwav, isT) iblu2 = wav2pix(wavrest-dwbg_in/2, w, nwav, isT) # pixel limits for red bg extraction ired1 = wav2pix(wavrest+dwbg_in/2, w, nwav, isT) ired2 = wav2pix(wavrest+dwbg_out/2, w, nwav, isT) print(iblu1, iblu2, i1, i2, ired1, ired2) # extract backgrounds on blue and red sides bgblu = data[:, iblu1:iblu2:sgn].mean(axis=1) bgred = data[:, ired1:ired2:sgn].mean(axis=1) # take weighted average, accounting for cases where the bg region # does not fit in the image weight_blu = data[:, iblu1:iblu2:sgn].size weight_red = data[:, ired1:ired2:sgn].size bg = (bgblu*weight_blu + bgred*weight_red)/(weight_blu + weight_red) data -= bg[:, None] profile = data[:, i1:i2:sgn].sum(axis=1) if smooth is not None: profile = convolve_fft(profile, Box1DKernel(smooth)) return profile
def get_active_region_map_2(path, image_file, mdi_flux_filter, hmi_flux_filter, kernal_std): if "HMI" in image_file: flux_magnitude_filter = hmi_flux_filter kernal_std = kernal_std * 4 hdu_index = 1 hdu_index = 0 flux_magnitude_filter = mdi_flux_filter else: hdu_index = 0 flux_magnitude_filter = mdi_flux_filter gauss = astropy.convolution.Gaussian2DKernel(stddev=kernal_std) hdulist = fits.open(path + image_file) hdu = hdulist[hdu_index] # <-- NEED CASE CONDITIONAL FOR MDI/HMI clean_data = preprocess_image(hdu) data_abs = np.abs(clean_data) # smooth data with Fast Fourier Transform smoothing = convolve_fft(data_abs, gauss) smoothing[smoothing < flux_magnitude_filter] = 0. smoothing[smoothing >= flux_magnitude_filter] = 1 # filter original image with active region map return np.where(smoothing !=0.0 , clean_data, smoothing), hdu
def convolution_wrapper(img, kernel, use_pyfftw=False, threads=1, pyfftw_kwargs={}, **kwargs): ''' Adjust parameter setting to be consistent with astropy <2 and >=2. Also allow the FFT to be performed with pyfftw. Parameters ---------- img : numpy.ndarray Image. use_pyfftw : bool, optional Enable to use pyfftw, if it is installed. threads : int, optional Number of threads to use in FFT when using pyfftw. pyfftw_kwargs : dict, optional Passed to `~turbustat.statistics.rfft_to_fft.rfft_to_fft`. See `here <http://hgomersall.github.io/pyFFTW/pyfftw/builders/builders.html>`_ for a list of accepted kwargs. kwargs : Passed to `~astropy.convolution.convolve_fft`. Returns ------- conv_img : `~numpy.ndarray` Convolved image. ''' if use_pyfftw: if PYFFTW_FLAG: use_fftn = fftn use_ifftn = ifftn else: warn("pyfftw not installed. Using numpy.fft functions.") use_fftn = np.fft.fftn use_ifftn = np.fft.ifftn else: use_fftn = np.fft.fftn use_ifftn = np.fft.ifftn if int(astro_version[0]) >= 2: conv_img = convolve_fft(img, kernel, normalize_kernel=True, fftn=use_fftn, ifftn=use_ifftn, **kwargs) else: raise Exception("Delta-variance requires astropy version >2.") # in astropy >= v2, fill_value can be a NaN. ignore_edge_zeros gives # the same behaviour in older versions. # if kwargs.get('fill_value'): # kwargs.pop('fill_value') # conv_img = convolve_fft(img, kernel, normalize_kernel=True, # ignore_edge_zeros=True, # fftn=use_fftn, # ifftn=use_ifftn, # **kwargs) return conv_img
def make_smooth_arrays(self): ''' Smooth data using a Gaussian kernel. ''' for i, width in enumerate(self.smoothing_radii): kernel = Gaussian2DKernel( width, x_size=self.img.shape[0], y_size=self.img.shape[1]) if self.nanflag: self.smoothed_images.append( convolve_fft(self.img, kernel, normalize_kernel=True, interpolate_nan=True)) else: self.smoothed_images.append(convolve_fft(self.img, kernel)) return self
def restore_cube(model: Image, psf: Image, residual=None, **kwargs) -> Image: """ Restore the model image to the residuals :params psf: Input PSF :return: restored image """ assert isinstance(model, Image), model assert isinstance(psf, Image), psf assert residual is None or isinstance(residual, Image), residual restored = copy_image(model) npixel = psf.data.shape[3] sl = slice(npixel // 2 - 7, npixel // 2 + 8) size = get_parameter(kwargs, "psfwidth", None) if size is None: # isotropic at the moment! from scipy.optimize import minpack try: fit = fit_2dgaussian(psf.data[0, 0, sl, sl]) if fit.x_stddev <= 0.0 or fit.y_stddev <= 0.0: log.debug( 'restore_cube: error in fitting to psf, using 1 pixel stddev' ) size = 1.0 else: size = max(fit.x_stddev, fit.y_stddev) log.debug('restore_cube: psfwidth = %s' % (size)) except minpack.error as err: log.debug('restore_cube: minpack error, using 1 pixel stddev') size = 1.0 except ValueError as err: log.debug( 'restore_cube: warning in fit to psf, using 1 pixel stddev') size = 1.0 else: log.debug('restore_cube: Using specified psfwidth = %s' % (size)) # TODO: Remove filter when astropy fixes convolve import warnings warnings.simplefilter(action='ignore', category=FutureWarning) from astropy.convolution import Gaussian2DKernel, convolve_fft # By convention, we normalise the peak not the integral so this is the volume of the Gaussian norm = 2.0 * numpy.pi * size**2 gk = Gaussian2DKernel(size) for chan in range(model.shape[0]): for pol in range(model.shape[1]): restored.data[chan, pol, :, :] = norm * convolve_fft( model.data[chan, pol, :, :], gk, normalize_kernel=False, allow_huge=True) if residual is not None: restored.data += residual.data return restored
def wrong_fft(): synthim = np.random.randn(100, 100) from astropy.convolution import convolve_fft, convolve yy, xx = np.indices(synthim.shape) rr = ((xx - 99 / 2.)**2 + (yy - 99 / 2.)**2)**0.5 sf = [ np.sum( np.abs(synthim - convolve_fft(synthim, (rr > ii) & (rr < ii + 1)))) for ii in np.arange(0, rr.shape[0] / 2., dtype='float') ] # wrapped version: sf = [ np.sum( np.abs(synthim - convolve_fft(synthim, (rr > ii) & (rr < ii + 1), boundary='wrap'))) for ii in p.arange(0, rr.shape[0] / 2., dtype='float') ]
def evaluate(self, x, y, amplitude, r_eff, n, x_0, y_0, ellip, theta): sersic = self.sersic_deconvolved.evaluate(x, y, amplitude, r_eff, n, x_0, y_0, ellip, theta) sersic_conv = convolve_fft(sersic, self.psf, boundary='wrap', normalize_kernel=True) return sersic_conv
def make_smooth_arrays(self): ''' Smooth data using a Gaussian kernel. ''' for i, width in enumerate(self.smoothing_radii): kernel = Gaussian2DKernel(width, x_size=self.data.shape[0], y_size=self.data.shape[1]) if self.nanflag: self.smoothed_images.append( convolve_fft(self.data, kernel, normalize_kernel=True, interpolate_nan=True)) else: self.smoothed_images.append(convolve_fft(self.data, kernel))
def gaussMask(self): import astropy.convolution as krn mask = np.zeros((self.par['xMax'], self.par['yMax'])) xx, yy = self.circMask() mask[xx, yy] = 1 gaus = krn.Gaussian2DKernel(self.gaussR) gmap = krn.convolve_fft(mask, gaus) return gmap
def generate_image(xy, mag, xy_ast=None, mag_ast=None, exp=None, nx=2048, ny=2048, psf=None): """ :param xy: :param mag: :param xy_ast: :param mag_ast: :param exp: exposure in seconds to 'normalize' streak :param nx: :param ny: :param psf: :return: """ if isinstance(xy, list): xy = np.array(xy) if isinstance(mag, list): mag = np.array(mag) image = np.zeros((ny, nx)) # let us assume that a 6 mag star would have a flux of 10^7 counts flux_0 = 1e9 # scale other stars wrt that: flux = flux_0 * 10**(0.4 * (6 - mag)) # print(flux) # add stars to image for k, (i, j) in enumerate(xy): if i < nx and j < ny: image[int(j), int(i)] = flux[k] if exp is None: exp = 1.0 # add asteroid if xy_ast is not None and mag_ast is not None: flux = flux_0 * 10**(0.4 * (6 - mag_ast)) # print(flux) xy_ast = np.array(xy_ast, dtype=np.int) line_points = get_line(xy_ast[0, :], xy_ast[1, :]) for (i, j) in line_points: if i < nx and j < ny: image[int(j), int(i)] = flux / exp if psf is None: # Convolve with a gaussian image = gaussian_filter(image, 7) else: # convolve with a (model) psf image = convolve_fft(image, psf) return image
def convolve_to_beam(fitsfilename, beam=radio_beam.Beam(0.04*u.arcsec), distance=5400*u.pc): hdr = fits.getheader(fitsfilename) pix_area = (hdr['CDELT1']*u.cm)**2 pix_area_arcsec = (pix_area/distance**2).to(u.arcsec**2, u.dimensionless_angles()) kernel = beam.as_kernel(pix_area_arcsec**0.5) data = fits.getdata(fitsfilename) smoothed = convolve_fft(data, kernel) return fits.PrimaryHDU(data=smoothed, header=hdr)
def loadCubeConvolve(rec,std=1): from astropy.convolution import convolve, convolve_fft from astropy.convolution import Gaussian2DKernel g = Gaussian2DKernel(stddev=std) dat =rec.data[0] cube = np.zeros(dat.shape) for i in range(dat.shape[0]): cube[i] = convolve_fft(dat[i],g) return cube
def update(ii): try: if os.path.exists(f'{imname}_selfcal{ii-1}_finaliter.image.tt0'): cube = SpectralCube.read(f'{imname}_selfcal{ii-1}.image.tt0', format='casa_image') im1.set_data(cube[0].value) cube = SpectralCube.read(f'{imname}_selfcal{ii-1}.residual.tt0', format='casa_image') im2.set_data(cube[0].value) cube = SpectralCube.read(f'{imname}_selfcal{ii-1}.model.tt0', format='casa_image') # assume the beam doesn't change size data = convolve_fft(cube[0].value, kernel, allow_huge=True) * ppbeam im3.set_data(data) title.set_text(f"Selfcal iteration {ii-1} (final clean)") return (im1,im2,im3), (ax1,ax2,ax3) elif ii == 0: im1.set_data(cube[0].value) cube = SpectralCube.read(f'{imname}_preselfcal.residual.tt0', format='casa_image') im2.set_data(cube[0].value) cube = SpectralCube.read(f'{imname}_preselfcal.model.tt0', format='casa_image') data = convolve_fft(cube[0].value, kernel, allow_huge=True) * ppbeam im3.set_data(data) title.set_text("Before Selfcal") return (im1,im2,im3), (ax1,ax2,ax3) else: cube = SpectralCube.read(f'{imname}_selfcal{ii}.image.tt0', format='casa_image') im1.set_data(cube[0].value) cube = SpectralCube.read(f'{imname}_selfcal{ii}.residual.tt0', format='casa_image') im2.set_data(cube[0].value) cube = SpectralCube.read(f'{imname}_selfcal{ii}.model.tt0', format='casa_image') # assume the beam doesn't change size data = convolve_fft(cube[0].value, kernel, allow_huge=True) * ppbeam im3.set_data(data) title.set_text(f"Selfcal iteration {ii}") return (im1,im2,im3), (ax1,ax2,ax3) except Exception as ex: print(ex)
def spatial_smooth( self, kernel=None, convbeam=True, spatial_smooth=None, spectral_smooth=None, niter=1 ): """ Smooth the noise estimate in the spatial dimension. Two components: median smoothing and convolving with the beam. """ # Manually median filter (square box) if kernel is not None: print "Median filtering" self.spatial_norm = ssig.medfilt2d(self.spatial_norm, kernel_size=kernel) data = self.cube.filled_data[:].astype('=f') if self.spatial_norm is None: self.spatial_norm = np.ones(data.shape[-2:]) self.spectral_norm = np.ones((data.shape[0])) for count in range(niter): scale = self.scale_cube snr = data/scale self.spatial_norm = nanstd(snr,axis=0)*self.spatial_norm if self.beam is not None: if self.astropy_beam_flag: beam = self.beam else: beam = self.beam.as_kernel(get_pixel_scales(self.cube.wcs)) self.spatial_norm = convolve_fft(self.spatial_norm, beam, interpolate_nan=True, normalize_kernel=True) if spatial_smooth is not None: self.spatial_norm = ssig.medfilt2d(self.spatial_norm, kernel_size=spatial_smooth) snr = data/self.scale_cube self.spectral_norm = nanstd(snr.reshape((snr.shape[0], snr.shape[1]* snr.shape[2])), axis=1)*self.spectral_norm if spectral_smooth is not None: self.spectral_norm = ssig.medfilt(self.spectral_norm, kernel_size=spectral_smooth) self.spectral_norm[np.isnan(self.spectral_norm) | (self.spectral_norm==0)]=1. self.spatial_norm[np.isnan(self.spatial_norm) | (self.spatial_norm==0)]=1. self.spatial_norm[~self.spatial_footprint]=np.nan ### THIS IS ALREADY SET IN calculate_scale # self.distribution_shape=(0,self.scale) return
def generate_blurred_map(self, kernel_size, band='H'): self.blrcube = self.cube.copy() * nan self.kernel_size = kernel_size self.kernel = Gaussian2DKernel(self.kernel_size) print 'Convolving spatially...' for i in arange(self.zsize): self.blrcube[i] = convolve_fft(self.cube[i], self.kernel) #self.blrcube += np.random.normal(0, max(self.blrcube.ravel())/100., shape(self.blrcube)) #KMOS reaches a point source 5-sigma sensitvity in 8 hr of #of (J, H, K) = (22, 21.0, 20.5) AB magnitudes #for R ~ (3380, 3800, 3750) #citation: http://www2011.mpe.mpg.de/Highlights/FB2004/exp13_bender.pdf if band == 'H': sens, R = 21.0, 3800 elif band == 'J': sens, R = 22.0, 3380 elif band == 'K': sens, R = 20.5, 3750 else: sens = 21.0 print 'Bad input band, setting sensitivty to H-band value, %s AB mag' % ( sens) print 'Convolving spectrally...' print 'Adding noise...' sens_si_fd = (sens * u.ABmag).to( u.Watt / (u.meter * u.meter) / (u.Hz)) * astropy.constants.c / (self.lam[0] * u.meter)**2. print sens_si_fd #The pixel values in our cube are W/m/m^2/Sr (surface brightness). To get to units of #flux density per pixel #check to make sure this pixel scale is in proper coordinates pix_scale_kpc = self.cube_hdr['CD1_1'] * u.kpc print pix_scale_kpc, 'per pixel' pix_scale_arc = pix_scale_kpc * cosmo.arcsec_per_kpc_proper( 1. / self.ascale - 1) print pix_scale_arc, 'per pixel' pix_scale_str = (pix_scale_arc**2.).to(u.steradian) print pix_scale_str, 'per square pixel' #Currently in m, want to get in terms of hz^-1. F_v = (F_lam)*lam^2/c #Ths units of this factor are 1/(str*m). Let's now consider an 'aperture' equal to #1 spatial fwhm * 1 spectral fwhm. We want to add noise equal to 1/5th the sensitivity over this aperture. #In steradians, the PSF is: psf_str = pi * (self.kernel_size**2. * u.arcsec**2.).to(u.steradian) print psf_str, 'is the seeing' #The spectral lsf fwhm (in pixels) is: lsf_pix = (3.e5 / R) / (self.vscale[1] - self.vscale[0]) print lsf_pix sens_noise = sens_si_fd / psf_str / lsf_pix print sens_noise self.cube += np.random.normal(0, sens_noise.value, self.cube.shape)
def find_dim_stars(array): # Perform a (3x3) median filter medArr3 = medfilt(array, 3) medArr9 = medfilt(array, 9) # Compute array statistics mean, median, stddev = sigma_clipped_stats(medArr3) # Locate pixels with more that 3-sigma deviation from the local median starPix = (medArr3 - medArr9)/stddev > 2 # Clean up the edge-effects (and kokopelli) starPix[0:20, :] = False starPix[-21:-1, :] = False starPix[:, 0:20] = False starPix[:, -21:-1] = False starPix[kokopelliMask] = False # Dialate the pixel mask sigma = 4.0 * gaussian_fwhm_to_sigma # FWHM = 3.0 # Build a kernel for detecting pixels above the threshold kernel = Gaussian2DKernel(sigma, x_size=9, y_size=9) kernel.normalize() starPix1 = convolve_fft( starPix.astype(float), kernel.array ) starPix1 = (starPix1 > 0.01) # Clean up the edge-effects starPix1[0:20, :] = False starPix1[-21:-1, :] = False starPix1[:, 0:20] = False starPix1[:, -21:-1] = False # Expand a second time to be conservative starPix11 = convolve_fft( starPix1.astype(float), kernel.array ) return starPix11 > 0.01
def fftconvolve_psf_basis(image, psf_basis, a_fields, x, y): imconvolved = np.zeros_like(image) for j in range(len(psf_basis)): a = a_fields[j](x, y) * image psf = psf_basis[j] imconvolved += convolve_fft(a, psf, interpolate_nan=True, allow_huge=True) return imconvolved
def _convolution(self, signal): from astropy.convolution import convolve, convolve_fft, Gaussian1DKernel G1D = Gaussian1DKernel(self.getProperty("ConvolutionWidth").value).array G3D = G1D * G1D.reshape((-1,1)) * G1D.reshape((-1,1,1)) try: logger.debug('Trying astropy.convolution.convolve_fft for convolution') return convolve_fft(signal, G3D) # Faster but will fail with large signal and kernel arrays except ValueError: logger.debug('Using astropy.convolution.convolve for convolution') return convolve(signal, G3D)
def fourierTransform(imageName, std=2.5): if 1 == 2: return 'Maths is dead' hdu_list = fits.open(imageName) data = hdu_list[0].data kernel = Gaussian2DKernel(stddev=std) fftData = convolve_fft(data, kernel) hdu_list[0].data = fftData hdu_list.writeto(fourierImageName, overwrite=True) return fourierImageName
def smooth_map(map, mask, smooth_kernal, smooth_scale, nan_flag): map[mask==0] = nan_flag if smooth_kernal=='box': kernel = Box2DKernel(smooth_scale) if smooth_kernal=='tophat': kernel = Tophat2DKernel(smooth_scale/2) return convolution.convolve_fft(map, kernel, normalize_kernel=True, ignore_edge_zeros=True, interpolate_nan=True)
def convolve_2D(gal,hdr,map2d,conbeam): ''' Returns 2D map (e.g. SFR), convolved to a beam width "conbeam". Parameters: ----------- gal : str OR Galaxy Name of galaxy, OR Galaxy object. hdr : fits.header.Header Header for the galaxy. map2d : np.ndarray The map (e.g. SFR) that needs to be convolved. conbeam : float Convolution beam width, in pc OR arcsec. Must specify units! The actual width of the Gaussian is conbeam/np.sqrt(8.*np.log(2)). Returns: -------- map2d_convolved : np.ndarray The same map, convolved. ''' if isinstance(gal,Galaxy): name = gal.name.lower() elif isinstance(gal,str): name = gal.lower() gal = Galaxy(name.upper()) else: raise ValueError("'gal' must be a str or galaxy!") if conbeam.unit in {u.pc, u.kpc, u.Mpc}: conbeam_width = conbeam.to(u.pc) # Beam width, in pc. conbeam_angle = conbeam / gal.distance.to(u.pc) * u.rad # Beam width, in radians. conbeam_angle = conbeam_angle.to(u.deg) / np.sqrt(8.*np.log(2)) # ..., in degrees, now as an # actual Gaussian stdev. elif conbeam.unit in {u.arcsec, u.arcmin, u.deg, u.rad}: conbeam_angle = conbeam.to(u.deg) / np.sqrt(8.*np.log(2))# Beam width, in degrees, now as an # actual Gaussian stdev. else: raise ValueError("'conbeam' must have units of pc or arcsec.") # Convert beam width into pixels, then feed this into a Gaussian-generating function. pixsizes_deg = wcs.utils.proj_plane_pixel_scales(wcs.WCS(hdr))[0]*u.deg # The size of each pixel, in deg. conbeam_pixwidth = conbeam_angle / pixsizes_deg # Beam width, in pixels. # print( "Pixel width of beam: "+str(conbeam_pixwidth)+" pixels.") gauss = Gaussian2DKernel(conbeam_pixwidth) map2d_convolved = convolve_fft(map2d,gauss,normalize_kernel=True) return map2d_convolved
def spatial_smooth(self, kernel=None, convbeam=True, spatial_smooth=None, spectral_smooth=None, niter=1): """ Smooth the noise estimate in the spatial dimension. Two components: median smoothing and convolving with the beam. """ # Manually median filter (square box) if kernel is not None: print "Median filtering" self.spatial_norm = ssig.medfilt2d(self.spatial_norm, kernel_size=kernel) data = self.cube.filled_data[:].astype('=f') if self.spatial_norm is None: self.spatial_norm = np.ones(data.shape[-2:]) self.spectral_norm = np.ones((data.shape[0])) for count in range(niter): scale = self.scale_cube snr = data / scale self.spatial_norm = nanstd(snr, axis=0) * self.spatial_norm if self.beam is not None: if self.astropy_beam_flag: beam = self.beam else: beam = self.beam.as_kernel(get_pixel_scales(self.cube.wcs)) self.spatial_norm = convolve_fft(self.spatial_norm, beam, interpolate_nan=True, normalize_kernel=True) if spatial_smooth is not None: self.spatial_norm = ssig.medfilt2d(self.spatial_norm, kernel_size=spatial_smooth) snr = data / self.scale_cube self.spectral_norm = nanstd(snr.reshape( (snr.shape[0], snr.shape[1] * snr.shape[2])), axis=1) * self.spectral_norm if spectral_smooth is not None: self.spectral_norm = ssig.medfilt(self.spectral_norm, kernel_size=spectral_smooth) self.spectral_norm[np.isnan(self.spectral_norm) | (self.spectral_norm == 0)] = 1. self.spatial_norm[np.isnan(self.spatial_norm) | (self.spatial_norm == 0)] = 1. self.spatial_norm[~self.spatial_footprint] = np.nan ### THIS IS ALREADY SET IN calculate_scale # self.distribution_shape=(0,self.scale) return
def degrade_spectrum(x, y, R_orig, regions, R_target): """ Degrade a spectrum to lower resolution. Degrade an input spectrum of higher resolution than the training resolution to the resolution 'R_target' by convolution with a Gaussian kernel of appropriate width. Only the parts relevant for computing ATHOS' flux ratios are considered and merged in a new spectrum 'y_new'. Therefore, several ranges are defined where the assumption of a constant profile FWHM should hold (both in pixel and wavelength space). Parameters ----------- x : array_like, shape (N, ) 1-D array containing the wavelength information. y : array_like, shape (N, ) 1-D array containing the flux information. R_orig : float Original resolution of the spectrograph. regions : array_like, shape (M, 2) Lower and upper bounds for the regions that are considered to have a constant FWHM. R_target : float The desired resolution to which the input spectrum is degraded. Returns ------- x_new : array_like, shape (L, ) Merged wavelength array consisting only of the FR-relevant parts of the spectrum. y_new : array_like, shape (L, ) Merged flux array consisting only of the FR-relevant parts of the spectrum at resolution 'R_target'. """ R_kernel = 1 / np.sqrt(1 / R_target**2 - 1 / R_orig**2) x_new, y_new = [], [] for r in regions: x_mask = (x > r[0]) & (x < r[1]) if np.any(x_mask): mean_dx = np.nanmedian(x[x_mask][1:] - x[x_mask][:-1]) kernel = Gaussian1DKernel( np.nanmean(x[x_mask]) / R_kernel / 2.3548 / mean_dx) y_deg = convolve_fft(y[x_mask], kernel) x_new += x[x_mask].tolist() y_new += y_deg.tolist() else: x_temp = np.arange(r[0], r[1], 0.1) x_new += x_temp.tolist() y_new += (np.zeros(len(x_temp)) * np.nan).tolist() x_new, y_new = np.array(x_new), np.array(y_new) return x_new, y_new
def blur_gauss(in_array, sigma, radius=30): ''' Performs a gaussian blur on an array of elevations. Modified from Mike Toews, https://gis.stackexchange.com/questions/9431/what-raster-smoothing-generalization-tools-are-available in_array: The input array, should be read using the supper_array technique from below. radius: The radius (in grid cells) of the gaussian blur kernel ''' # This comment block is old and left here for posterity # Change all NoData values to mean of valid values to fix issues with # massive (float32.max) NoData values completely overwhelming other array # data. Using mean instead of 0 gives a little bit more usable data on # edges. # Create masked array to get mean of valid data # masked_array = np.ma.masked_values(in_array, s_nodata) # array_mean = masked_array.mean() # # Create new array that will have NoData values replaced by array_mean # cleaned_array = np.copy(in_array) # np.putmask(cleaned_array, cleaned_array==s_nodata, array_mean) # convolving: output pixel is the sum of the multiplication of each value # covered by the kernel with the associated kernel value (the kernel is a # set size/shape and each position has a value, which is the multiplication # factor used in the convolution). # Create new array with s_nodata values set to np.nan (for edges of raster) nan_array = np.where(in_array == s_nodata, np.nan, in_array) # build kernel (Gaussian blur function) # g is a 2d gaussian distribution of size (2*size) + 1 x, y = np.mgrid[-radius:radius + 1, -radius:radius + 1] # Gaussian distribution twosig = 2 * sigma**2 #g = np.exp(-(x**2 / twosig + y**2 / twosig)) / (twosig * math.pi) #LoG g = (-1 / (math.pi * sigma**4)) * (1 - (x**2 + y**2) / twosig) * np.exp( -(x**2 / twosig + y**2 / twosig)) / (twosig) g = 1 - g # Convolve the data and Gaussian function (do the Gaussian blur) # Supressing runtime warnings due to NaNs (they just get hidden by NoData # masks in the supper_array rebuild anyways) with warnings.catch_warnings(): warnings.simplefilter("ignore", category=RuntimeWarning) # Use the astropy function because fftconvolve does not like np.nan #smoothed = fftconvolve(padded_array, g, mode="valid") smoothed = convolve_fft(nan_array, g, nan_treatment='interpolate', normalize_kernel=False) # Uncomment the following line for a high-pass filter #smoothed = nan_array - smoothed return smoothed
def conv_image2(image, key, kernel, z=0., psf_rad=None, kernel_tag='', fft=False): # box size if '12' in key: (L_x, L_y, L_z) = (12.5, ) * 3 if '25' in key: (L_x, L_y, L_z) = (25., ) * 3 if '_h' in key: L_z = L_z / 2. if '_q' in key: L_z = L_z / 4. boxlength = L_x # wavelength if 'halpha' in key: line = 'halpha' # numpix if '_8_' in key or key[-2:] == '_8': numpix = 8000 if '_4_' in key or key[-2:] == '_4': numpix = 4000 if '_2_' in key or key[-2:] == '_2': numpix = 2000 if psf_rad == None: if kernel == 'gauss': psf_rad = 10 if kernel == 'airy': psf_rad = 300 kernel_key = '(%s,%s,%s,%s,%s,%s)' % (str(boxlength), str(numpix), str( z), line, str(psf_rad), kernel) + kernel_tag if kernel_key not in kernels.keys(): kernels.update( generate_kernel(line, boxlength, numpix, kernel, z, psf_rad=psf_rad, tag=kernel_tag)) conv_kernel = kernels[kernel_key] if fft: return np.log10( cnv.convolve_fft(10**image, conv_kernel, boundary='wrap')) else: return np.log10(cnv.convolve(10**image, conv_kernel, boundary='wrap'))
def psfimg(self): """ A method for applying the relevant PSF to an image made for a specific NIRCam filter. :param f: The filter string 'JWST.NIRCam.XXXXX' :return: The PSF'd image array. """ # Convolve the PSF with the image convolved_img = convolve_fft(self.img, self.PSF.PSF) return convolved_img
def map_of_means(param,lat,lon,step=1,lat_bounds=[-90,90],lon_bounds=[-180,180],kernel_w=0,**kwargs): ### TODO dlat//step should be int'd, not dlat dlat = lat_bounds[1] - lat_bounds[0] dlon = lon_bounds[1] - lon_bounds[0] step_inv=0 if step>=1: step=int(step) mean_bin = np.zeros((int(round(dlat//step)),int(round(dlon//step)))) lat = (lat//step_inv) lon = (lon//step) else: step_inv = int(round(1/step)) mean_bin = np.zeros((int(round(dlat*step_inv)),int(round(dlon*step_inv)))) lat = (lat*step_inv) lon = (lon*step_inv) count = mean_bin.copy().astype(int) N = len(param) for i in range(N): loc = int(round((lat[i]-lat_bounds[0])*step_inv)),int(round((lon[i]-lon_bounds[0])*step_inv)) count[loc] += 1 mean_bin[loc] += param[i] mean_bin = mean_bin/count from astropy.convolution import convolve_fft,Box2DKernel conv_kwargs = {"boundary":'wrap',"normalize_kernel":True,"interpolate_nan":True} conv_kwargs.update(kwargs) D = 86400 #points in 1 day if kernel_w<1: if N<7*D: kernel_w = np.ceil(5*max(1,step_inv)) else: kernel_w = np.ceil(max(5,step_inv)) msg="\n\t".join(["\n\tStep size: {} (1/step if less than 1: {})" .format(step,step_inv), "Number of points: {}".format(N), "Kernel width: {}".format(kernel_w)]) auxiliary.logger.debug(msg) g = Box2DKernel(kernel_width) conv=convolve_fft(mean_bin,g,**kwargs) nans = np.isnan(mean_bin) mean_bin[nans] = conv[nans] lat_grid,lon_grid = np.arange(-90+0.5*step,90,step),np.arange(-180+0.5*step,180,step) return lat_grid,lon_grid,mean_bin
def inspectsum(sim, convolved=True): summed_image = sim.HSCimage['g'] if convolved: for b in sim.bands: pshape = (np.array(sim.psf[b].shape) / sim.oversampling).astype('int32') psf = make_galaxies.rebin(sim.psf[b], pshape[0], pshape[1]) summed_image += convolve_fft(sim.HSCimage[b], psf) else: summed_image += sim.HSCimage[b] return (summed_image)
def _gsmooth_spectrum(args): """ HELPER FUNCTION: private! Smooth a spectrum with a gaussian in 1d """ spec,kernel,use_fft,kwargs = args if use_fft: return convolve_fft(spec, kernel, normalize_kernel=True, **kwargs) else: return convolve(spec, kernel, normalize_kernel=True, **kwargs)
def corr2d(img1, img2, max_movement=12): """ Calculate translation between image 1 and image 2. Using fft based cross-corelation the best match between image1 and image2 is found. Image 1 and image 2 must be the same size. If not, use get_overlap_images before hand. :param img1: np.array of uint8, image 1. :param img2: np.array of uint8, image 2. :param max_movement: integer, max translation to look for. :return: A list of translations to apply to image 2 for it to overlap with image 1. """ # Calculate fft based 2d cross-correlation xcorr2d = convolve_fft(img1, img2[::-1, ::-1], 'wrap') # Calculate image midpoints mid_points = (np.array(np.shape(xcorr2d)) - 1) / 2 # Crop out 25x25 pixels around midpoint of xcorr result xcorr2d_crop = xcorr2d[mid_points[0] - max_movement:mid_points[0] + max_movement + 1, mid_points[1] - max_movement:mid_points[1] + max_movement + 1] # Get maximum indexes and recalculate to full image coordinates i_idx, j_idx = np.unravel_index(xcorr2d_crop.argmax(), xcorr2d_crop.shape) i_idx = i_idx + mid_points[0] - max_movement j_idx = j_idx + mid_points[1] - max_movement n_fit = 3 delta_ij = np.array([0, 0]) if i_idx >= n_fit and j_idx >= n_fit and i_idx + n_fit < np.size(xcorr2d, 0) and \ j_idx + n_fit < np.size(xcorr2d, 1): # Fit data to p(0) + p(1)*x + p(2)*y + p(3)*x^2 + p(4)*xy + p(5)*y^2 and get top point fit_data = np.log(xcorr2d[i_idx - n_fit:i_idx + n_fit + 1, j_idx - n_fit:j_idx + n_fit + 1]) idx_j, idx_i = np.meshgrid(np.arange(-n_fit, n_fit + 1), np.arange(-n_fit, n_fit + 1)) idx_j = idx_j.flatten() idx_i = idx_i.flatten() p = np.linalg.lstsq( np.array([ np.ones(np.shape(idx_i)), idx_i, idx_j, idx_i**2, idx_i * idx_j, idx_j**2 ]).T, fit_data.flatten())[0] delta_ij = np.array([ 2 * p[2] * p[3] - p[1] * p[4], 2 * p[5] * p[1] - p[2] * p[4] ]) / (p[4]**2 - 4 * p[5] * p[3]) # Sanity check if np.any(np.abs(delta_ij) > 1.5): delta_ij = np.array([0, 0]) # Recalculate midpoint and make it relative return delta_ij + np.array([i_idx, j_idx]) - mid_points
def _gsmooth_img(args): """ HELPER FUNCTION: private! Smooth an image with a gaussian in 2d """ img,kernel,use_fft,kwargs = args if use_fft: return convolve_fft(img, kernel, normalize_kernel=True, **kwargs) else: return convolve(img, kernel, normalize_kernel=True, **kwargs)
def test_filter_1D(std): data = np.random.rand(4, 4) ds = xr.DataArray(data, dims=["time", "something"]) ds_filt = filter_1D(ds, std) for xi, xx in enumerate(ds.something.data): sample = ds_filt.sel({"something": xx}) kernel = Gaussian1DKernel(std) expected = convolve_fft(data[:, xi], kernel, boundary="wrap") # result[np.isnan(raw_data)] = np.nan np.testing.assert_allclose(sample, expected)
def test_filter_2D(radius): data = np.random.rand(4, 3, 6) ds = xr.DataArray(data, dims=["x", "something", "y"]) ds_filt = filter_2D(ds, radius, dim=["x", "y"]) for xi, xx in enumerate(ds.something.data): sample = ds_filt.sel({"something": xx}) kernel = Gaussian2DKernel(radius) expected = convolve_fft(data[:, xi, :], kernel, boundary="wrap") # result[np.isnan(raw_data)] = np.nan np.testing.assert_allclose(sample, expected)
def convolve_img(img, size, sizey, rot): size = int(size) if not sizey: sizey = size else: sizey = int(sizey) x, y = scipy.mgrid[-size:size+1, -sizey:sizey+1] g = scipy.exp(-(x**2/float(size)+y**2/float(sizey))) g = scipy.ndimage.interpolation.rotate(g, rot) z = convolve_fft(img, g) return z
def do_convolutions(self, allow_huge=False): for i, lag in enumerate(self.lags.value): core = core_kernel(lag, self.data.shape[0], self.data.shape[1]) annulus = annulus_kernel(lag, self.diam_ratio, self.data.shape[0], self.data.shape[1]) # Extend to avoid boundary effects from non-periodicity pad_weights = np.pad(self.weights, int(lag), padwithzeros) pad_img = np.pad(self.data, int(lag), padwithzeros) * pad_weights img_core = convolve_fft(pad_img, core, normalize_kernel=True, interpolate_nan=self.nanflag, ignore_edge_zeros=True, allow_huge=allow_huge) img_annulus = convolve_fft(pad_img, annulus, normalize_kernel=True, interpolate_nan=self.nanflag, ignore_edge_zeros=True, allow_huge=allow_huge) weights_core = convolve_fft(pad_weights, core, normalize_kernel=True, interpolate_nan=self.nanflag, ignore_edge_zeros=True, allow_huge=allow_huge) weights_annulus = convolve_fft(pad_weights, annulus, normalize_kernel=True, interpolate_nan=self.nanflag, ignore_edge_zeros=True, allow_huge=allow_huge) weights_core[np.where(weights_core == 0)] = np.NaN weights_annulus[np.where(weights_annulus == 0)] = np.NaN self.convolved_arrays.append((img_core / weights_core) - (img_annulus / weights_annulus)) self.convolved_weights.append(weights_core * weights_annulus)
def find_2MASS_flux(array): # Identify which pixels have acceptable "background" levels. Start by # grabbing the image statistics mean, median, stddev = sigma_clipped_stats(array) # Idesntify pixels more than 2-sigma above the background fgdThresh = median + 2.0*stddev fgdRegion = array > fgdThresh # Repeat the classification withiout the *definitely* nebular pixels bkgPix = np.logical_not(fgdRegion) mean, median, stddev = sigma_clipped_stats(array[bkgPix]) fgdThresh = median + 2.0*stddev fgdRegion = array > fgdThresh # Clean the foreground ID region all_labels = measure.label(fgdRegion) all_labels1 = morphology.remove_small_objects(all_labels, min_size=50) fgdRegion = all_labels1 > 0 # Dilate a TOOON to be conservatine... sigma = 20.0 * gaussian_fwhm_to_sigma # FWHM = 3.0 # Build a kernel for detecting pixels above the threshold kernel = Gaussian2DKernel(sigma, x_size=41, y_size=41) kernel.normalize() fgdRegion= convolve_fft( fgdRegion.astype(float), kernel.array ) fgdRegion = (fgdRegion > 0.01) # Expand a second time to be conservative fgdRegion= convolve_fft( fgdRegion.astype(float), kernel.array ) fgdRegion = (fgdRegion > 0.01) # Return the flux-bright pixels to the user return fgdRegion
def frame_filter_lowpass(array, mode='gauss', median_size=5, fwhm_size=5, gauss_mode='conv'): """ Low-pass filtering of input frame depending on parameter ``mode``. Parameters ---------- array : numpy ndarray Input array, 2d frame. mode : {'median', 'gauss'}, str optional Type of low-pass filtering. median_size : int, optional Size of the median box for filtering the low-pass median filter. fwhm_size : int, optional Size of the Gaussian kernel for the low-pass Gaussian filter. gauss_mode : {'conv', 'convfft'}, str optional 'conv' uses the multidimensional gaussian filter from scipy.ndimage and 'convfft' uses the fft convolution with a 2d Gaussian kernel. Returns ------- filtered : numpy ndarray Low-pass filtered image. """ if array.ndim != 2: raise TypeError('Input array is not a frame or 2d array.') if not isinstance(median_size, int): raise ValueError('`Median_size` must be integer') if mode == 'median': # creating the low_pass filtered (median) image filtered = median_filter(array, median_size, mode='nearest') elif mode == 'gauss': # 2d Gaussian filter sigma = fwhm_size * gaussian_fwhm_to_sigma if gauss_mode == 'conv': filtered = gaussian_filter(array, sigma=sigma, order=0, mode='nearest') elif gauss_mode == 'convfft': # FFT Convolution with a 2d gaussian kernel created with Astropy. filtered = convolve_fft(array, Gaussian2DKernel(stddev=sigma)) else: raise TypeError('2d Gaussian filter mode not recognized') else: raise TypeError('Low-pass filter mode not recognized') return filtered
def filter_wrapper(i): n_im, length, width = cum.shape if np.mod(i, 10) == 0: print(" {0:3}/{1:3}th image...".format(i, n_im), flush=True) ### Second, HP in time if filtwidth_yr == 0.0: cum_hpt = cum[i, :, :] ## No temporal filter else: time_diff_sq = (dt_cum[i]-dt_cum)**2 ## Limit reading data within filtwidth_yr**8 ixs = time_diff_sq < filtwidth_yr*8 weight_factor = np.tile(np.exp(-time_diff_sq[ixs]/2/filtwidth_yr**2)[:, np.newaxis, np.newaxis], (1, length, width)) #len(ixs), length, width ## Take into account nan in cum weight_factor = weight_factor*(~np.isnan(cum[ixs, :, :])) ## Normalize weight with warnings.catch_warnings(): ## To silence warning by zero division warnings.simplefilter('ignore', RuntimeWarning) weight_factor = weight_factor/np.sum(weight_factor, axis=0) cum_lpt = np.nansum(cum[ixs, :, :]*weight_factor, axis=0); cum_hpt = cum[i, :, :] - cum_lpt ### Third, LP in space and subtract from original if filtwidth_km == 0.0: _cum_filt = cum[i, :, :] ## No spatial else: with warnings.catch_warnings(): ## To silence warning if i ==0: cum_hpt = cum_hpt+sys.float_info.epsilon ##To distinguish from 0 of filtered nodata # warnings.simplefilter('ignore', FutureWarning) warnings.simplefilter('ignore', RuntimeWarning) kernel = Gaussian2DKernel(x_stddev, y_stddev) cum_hptlps = convolve_fft(cum_hpt*mask, kernel, fill_value=np.nan, allow_huge=True) ## fill edge 0 for interpolation cum_hptlps[cum_hptlps == 0] = np.nan ## fill 0 with nan _cum_filt = cum[i, :, :] - cum_hptlps ### Output comparison image data3 = [np.angle(np.exp(1j*(data/coef_r2m/cycle))*cycle) for data in [cum[i, :, :]*mask, cum_hptlps*mask, _cum_filt*mask]] title3 = ['Before filter ({}pi/cycle)'.format(cycle*2), 'Filter phase ({}pi/cycle)'.format(cycle*2), 'After filter ({}pi/cycle)'.format(cycle*2)] pngfile = os.path.join(filtcumdir, imdates[i]+'_filt.png') plot_lib.make_3im_png(data3, pngfile, cmap_wrap, title3, vmin=-np.pi, vmax=np.pi, cbar=False) return _cum_filt
def _detect_specimens(self, reconstructed_wave, propagation_distance, margin=100, kernel_radius=4.0, save_png_to_disk=None): cropped_img = reconstructed_wave.phase[margin:-margin, margin:-margin] best_convolved_phase = convolve_fft(cropped_img, MexicanHat2DKernel(kernel_radius)) best_convolved_phase_copy = best_convolved_phase.copy(order='C') # Find positive peaks blob_doh_kwargs = dict(threshold=0.00007, min_sigma=2, max_sigma=10) blobs = blob_doh(best_convolved_phase_copy, **blob_doh_kwargs) # Find negative peaks negative_phase = -best_convolved_phase_copy negative_phase += (np.median(best_convolved_phase_copy) - np.median(negative_phase)) negative_blobs = blob_doh(negative_phase, **blob_doh_kwargs) all_blobs = [] for blob in blobs: if blob.size > 0: all_blobs.append(blob) for neg_blob in negative_blobs: if neg_blob.size > 0: all_blobs.append(neg_blob) if len(all_blobs) > 0: all_blobs = np.vstack(all_blobs) # If save pngs: if save_png_to_disk is not None: path = "{0}/{1:.4f}.png".format(save_png_to_disk, propagation_distance) save_scaled_image(reconstructed_wave.phase, path, margin, all_blobs) # Blobs get returned in rows with [x, y, radius], so save each # set of blobs with the propagation distance to record z # correct blob positions for margin: all_blobs = np.float64(all_blobs) if len(all_blobs) > 0: all_blobs[:, 0] += margin all_blobs[:, 1] += margin all_blobs[:, 2] = propagation_distance return all_blobs else: return None
def locate_peaks(self, sigma_level, kernel=None): """ This function ... :param sigma_level: :param kernel: :return: """ # If a subtracted box is present, use it to locate the peaks box = self.subtracted if self.has_background else self.cutout # Calculate the sigma-clipped statistics of the box mean, median, stddev = statistics.sigma_clipped_statistics(box, sigma=3.0, mask=self.background_mask.data) # Sigma 3.0 for clipping is what photutils uses in detect_threshold #sigma_level = 1.5 # I once tried to investigate why some clear peaks were not detected, did not have time .. threshold = median + (sigma_level * stddev) # Convolve the box with the given kernel, if any if kernel is not None: box = convolve_fft(box, kernel, normalize_kernel=True) # Find peaks #threshold = detect_threshold(box, snr=2.0) # other method (snr corresponds to sigma_level as I use it above) peaks = find_peaks(box, threshold, box_size=5, mask=self.background_mask) # For some reason, once in a while, an ordinary list comes out of the find_peaks routine instead of an # Astropy Table instance. We assume we need an empty table in this case if type(peaks) is list: peaks = Table([[], []], names=('x_peak', 'y_peak')) # Initialize a list to contain the peak positions positions = [] # Loop over the peaks for peak in peaks: # Calculate the absolute x and y coordinate of the peak x_rel = peak['x_peak'] y_rel = peak['y_peak'] x = x_rel + self.cutout.x_min y = y_rel + self.cutout.y_min # Check whether the peak position falls in the box and then add it to the list peak_position = PixelCoordinate(x, y) if self.cutout.contains(peak_position): # Add the coordinates to the positions list positions.append(peak_position) else: print("DEBUG: peak position", peak_position, "falls outside of box with shape", box.shape) # If exactly one peak was found, set the self.peak attribute accordingly if len(positions) == 1: self.peak = positions[0] # Return the list of peak positions return positions
def simulated_cluster(n_stars=10000, dimensions=(512, 512)): """ Generates an image simulating a cluster of stars, including a Gaussian filter and background noise. Parameters ---------- n_stars : `int` A positive integer giving the number of visible stars in the image (default: 10000). dimensions : `tuple` A two-tuple of positive integers specifying the dimensions (in pixels) of the output image (default: 512x512). Returns ------- array : `~numpy.ndarray` A 2D Numpy array containing the pixels of the generated image. """ nx, ny = dimensions # Create empty image image = np.zeros((ny, nx)) # Generate random positions r = np.random.random(n_stars) * nx theta = np.random.uniform(0., 2. * np.pi, n_stars) # Generate random fluxes fluxes = np.random.random(n_stars) ** 2 # Compute position x = nx / 2 + r * np.cos(theta) y = ny / 2 + r * np.sin(theta) # Add stars to image # ==> First for loop and if statement <== for idx in range(n_stars): if x[idx] >= 0 and x[idx] < nx and y[idx] >= 0 and y[idx] < ny: image[y[idx], x[idx]] += fluxes[idx] # Convolve with a gaussian kernel = Gaussian2DKernel(stddev=1) image = convolve_fft(image, kernel) # Add noise image += np.random.normal(1., 0.001, image.shape) return image
def match_resolution(image,f1,f2,kernel=None,data1_res=None): import time ### grab kernel if kernel is None: kernel, resolution = psf_match(f1,f2,data1_res=data1_res) t1 = time.time() convolved_image = convolve_fft(image, kernel,interpolate_nan='fill') d1 = time.time() - t1 print('convolution took {0}s'.format(d1)) return convolved_image, kernel
def frame_filter_lowpass(array, mode='gauss', median_size=5, fwhm_size=5, gauss_mode='conv'): """ Low-pass filtering of input frame depending on parameter ``mode``. Parameters ---------- array : array_like Input array, 2d frame. mode : {'median', 'gauss'}, str optional Type of low-pass filtering. median_size : int, optional Size of the median box for filtering the low-pass median filter. fwhm_size : int, optional Size of the Gaussian kernel for the low-pass Gaussian filter. gauss_mode : {'conv', 'convfft'}, str optional 'conv' uses the multidimensional gaussian filter from scipy.ndimage and 'convfft' uses the fft convolution with a 2d Gaussian kernel. Returns ------- filtered : array_like Low-pass filtered image. """ if array.ndim != 2: raise TypeError('Input array is not a frame or 2d array.') if not isinstance(median_size, int): raise ValueError('`Median_size` must be integer') if mode == 'median': # creating the low_pass filtered (median) image filtered = median_filter(array, median_size, mode='nearest') elif mode == 'gauss': # 2d Gaussian filter sigma = fwhm_size * gaussian_fwhm_to_sigma if gauss_mode == 'conv': filtered = gaussian_filter(array, sigma=sigma, order=0, mode='nearest') elif gauss_mode == 'convfft': # FFT Convolution with a 2d gaussian kernel created with Astropy. filtered = convolve_fft(array, Gaussian2DKernel(stddev=sigma)) else: raise TypeError('2d Gaussian filter mode not recognized') else: raise TypeError('Low-pass filter mode not recognized') return filtered
def _smooth_filter_array(array, kernel_shape, kernel_type='nuttall', gaussian_cutoff=3.0): kernel_func_dict = {'nuttall': nuttall, 'blackmanharris': blackmanharris, 'hanning': hanning, 'gaussian': gaussian} func = kernel_func_dict[kernel_type] if kernel_type == 'gaussian': std = np.array(kernel_shape) / gaussian_cutoff k = func(kernel_shape[0], std[0])[:, None, None] * \ func(kernel_shape[1], std[1])[None, :, None] * \ func(kernel_shape[2], std[2])[None, None, :] else: k = func(kernel_shape[0])[:, None, None] * \ func(kernel_shape[1])[None, :, None] * \ func(kernel_shape[2])[None, None, :] out = convolve_fft(array, k, boundary='wrap', allow_huge=True) return out
def test_MVC_beamcorrect(): imsize = 128 theta = 0 plaw = 3.0 ellip = 1.0 beam = Beam(30 * u.arcsec) plane = make_extended(imsize, powerlaw=plaw, ellip=ellip, theta=theta, return_fft=False) plane = convolve_fft(plane, beam.as_kernel(10 * u.arcsec), boundary='wrap') # Generate a header hdu = fits.PrimaryHDU(plane) hdu.header['CDELT1'] = (10 * u.arcsec).to(u.deg).value hdu.header['CDELT2'] = - (10 * u.arcsec).to(u.deg).value hdu.header['BMAJ'] = beam.major.to(u.deg).value hdu.header['BMIN'] = beam.major.to(u.deg).value hdu.header['BPA'] = 0.0 hdu.header['CRPIX1'] = imsize / 2., hdu.header['CRPIX2'] = imsize / 2., hdu.header['CRVAL1'] = 0.0, hdu.header['CRVAL2'] = 0.0, hdu.header['CTYPE1'] = 'GLON-CAR', hdu.header['CTYPE2'] = 'GLAT-CAR', hdu.header['CUNIT1'] = 'deg', hdu.header['CUNIT2'] = 'deg', hdu.header.update(beam.to_header_keywords()) ones = np.ones_like(plane) test = MVC(hdu, fits.PrimaryHDU(ones), fits.PrimaryHDU(ones)) test.run(beam_correct=True, low_cut=10**-1.5 / u.pix, high_cut=1 / (6 * u.pix), fit_2D=False) npt.assert_allclose(-plaw, test.slope, rtol=0.02)
def convolve(lopsf, hipsf, outker, threshold, highresimg, outname, comp_kernel): """Match the higher-res psf to the lower-res psf and ### note for optical and UV, a threshold of 0.14 was found to be ideal. ### for two IR ones, a threshold of 0.03 seemed to work better. """ if comp_kernel is True: # compute the psf matching function and save it to outker psfmatch(hipsf, lopsf, hipsf, outker, convolution="psf", background="none", threshold=threshold) # read in kernel k = fits.getdata(outker) # read in high res image to be convolved im, hdr = fits.getdata(highresimg, header=True) # convolve highresimg with the psf matching function in outker # to produce output image convimg = convolve_fft(im, k) fits.writeto(outname, convimg, header=hdr, clobber=True)
def test_VCA_beamcorrect(): imsize = 128 theta = 0 plaw = 3.0 ellip = 1.0 beam = Beam(30 * u.arcsec) nchans = 10 # Generate a red noise model cube cube = np.empty((nchans, imsize, imsize)) for i in range(nchans): plane = make_extended(imsize, powerlaw=plaw, ellip=ellip, theta=theta, return_fft=False) cube[i] = convolve_fft(plane, beam.as_kernel(10 * u.arcsec), boundary='wrap') # Generate a header hdu = fits.PrimaryHDU(cube) hdu.header['CDELT1'] = (10 * u.arcsec).to(u.deg).value hdu.header['CDELT2'] = - (10 * u.arcsec).to(u.deg).value hdu.header['BMAJ'] = beam.major.to(u.deg).value hdu.header['BMIN'] = beam.major.to(u.deg).value hdu.header['BPA'] = 0.0 hdu.header['CRPIX1'] = imsize / 2., hdu.header['CRPIX2'] = imsize / 2., hdu.header['CRVAL1'] = 0.0, hdu.header['CRVAL2'] = 0.0, hdu.header['CTYPE1'] = 'GLON-CAR', hdu.header['CTYPE2'] = 'GLAT-CAR', hdu.header['CUNIT1'] = 'deg', hdu.header['CUNIT2'] = 'deg', hdu.header.update(beam.to_header_keywords()) test = VCA(hdu) test.run(beam_correct=True, high_cut=1 / (6 * u.pix), fit_2D=False) npt.assert_allclose(-plaw, test.slope, rtol=0.02)
def get_noise_cube(self): """ Generates a data cube of randomly generated noise with properties matching the measured distribution. """ noise = np.random.randn(*self.cube.shape) if self.beam is not None: if self.astropy_beam_flag: beam = self.beam else: beam = self.beam.as_kernel(get_pixel_scales(self.cube.wcs)) # Iterate convolution over plane (ugh) for plane in np.arange(self.cube.shape[0]): noise[plane, :, :] = convolve_fft(noise[plane, :, :], beam, normalize_kernel=True) self._noise_cube = noise * self.scale_cube return self
def get_active_region_map(path, image_file): kernal_std = 10 gauss = astropy.convolution.Gaussian2DKernel(stddev=kernal_std) hdulist = fits.open(path + image_file) hdu = hdulist[0] # <-- NEED CASE CONDITIONAL FOR MDI/HMI #image_date = hdu.header["DATE-OBS"].replace("/", " ") #image_time = hdu.header["TIME-OBS"].rstrip(".").partition(".")[0] clean_data = preprocess_image(hdu) data_abs = np.abs(clean_data) if hdu.header["INSTRUME"] == "MDI": k = 130 # <-- use this value, confirmed by David else: k = 130 # TODO --> NEED TO FIND TRUE K VALUE FOR HMI DATA !!! # smooth data with Fast Fourier Transform smoothing = convolve_fft(data_abs, gauss) smoothing[smoothing < k] = 0. smoothing[smoothing >= k] = 1 # filter original image with active region map return np.where(smoothing !=0.0 , clean_data, smoothing), hdu