def test_subgrid_rebin(): kernel_size = 11 subgrid_res = 3 sigma = 1 from lenstronomy.LightModel.Profiles.gaussian import Gaussian gaussian = Gaussian() x_grid, y_gird = Util.make_grid(kernel_size, 1. / subgrid_res, subgrid_res) flux = gaussian.function(x_grid, y_gird, amp=1, sigma_x=sigma, sigma_y=sigma) kernel = Util.array2image(flux) print(np.shape(kernel)) kernel = util.averaging(kernel, numGrid=kernel_size * subgrid_res, numPix=kernel_size) kernel = kernel_util.kernel_norm(kernel) subgrid_kernel = kernel_util.subgrid_kernel(kernel, subgrid_res=subgrid_res, odd=True) kernel_pixel = util.averaging(subgrid_kernel, numGrid=kernel_size * subgrid_res, numPix=kernel_size) kernel_pixel = kernel_util.kernel_norm(kernel_pixel) assert np.sum((kernel_pixel - kernel)**2) < 0.1
def test_deshift_subgrid(): # test the de-shifting with a sharpened subgrid kernel kernel_size = 5 subgrid = 3 fwhm = 1 kernel_subgrid_size = kernel_size * subgrid kernel_subgrid = np.zeros((kernel_subgrid_size, kernel_subgrid_size)) kernel_subgrid[7, 7] = 2 kernel_subgrid = kernel_util.kernel_gaussian(kernel_subgrid_size, 1. / subgrid, fwhm=fwhm) kernel = util.averaging(kernel_subgrid, kernel_subgrid_size, kernel_size) shift_x = 0.18 shift_y = 0.2 shift_x_subgird = shift_x * subgrid shift_y_subgrid = shift_y * subgrid kernel_shifted_subgrid = interp.shift(kernel_subgrid, [-shift_y_subgrid, -shift_x_subgird], order=1) kernel_shifted = util.averaging(kernel_shifted_subgrid, kernel_subgrid_size, kernel_size) kernel_shifted_highres = kernel_util.subgrid_kernel(kernel_shifted, subgrid_res=subgrid, num_iter=1) """
def subgrid_kernel(kernel, subgrid_res, odd=False, num_iter=100): """ creates a higher resolution kernel with subgrid resolution as an interpolation of the original kernel in an iterative approach :param kernel: initial kernel :param subgrid_res: subgrid resolution required :return: kernel with higher resolution (larger) """ subgrid_res = int(subgrid_res) if subgrid_res == 1: return kernel nx, ny = np.shape(kernel) d_x = 1. / nx x_in = np.linspace(d_x/2, 1-d_x/2, nx) d_y = 1. / nx y_in = np.linspace(d_y/2, 1-d_y/2, ny) nx_new = nx * subgrid_res ny_new = ny * subgrid_res if odd is True: if nx_new % 2 == 0: nx_new -= 1 if ny_new % 2 == 0: ny_new -= 1 d_x_new = 1. / nx_new d_y_new = 1. / ny_new x_out = np.linspace(d_x_new/2., 1-d_x_new/2., nx_new) y_out = np.linspace(d_y_new/2., 1-d_y_new/2., ny_new) kernel_input = copy.deepcopy(kernel) kernel_subgrid = image_util.re_size_array(x_in, y_in, kernel_input, x_out, y_out) kernel_subgrid = kernel_norm(kernel_subgrid) for i in range(max(num_iter, 1)): # given a proposition, re-size it to original pixel size if subgrid_res % 2 == 0: kernel_pixel = averaging_even_kernel(kernel_subgrid, subgrid_res) else: kernel_pixel = util.averaging(kernel_subgrid, numGrid=nx_new, numPix=nx) delta = kernel - kernel_pixel temp_kernel = kernel_input + delta kernel_subgrid = image_util.re_size_array(x_in, y_in, temp_kernel, x_out, y_out)#/norm_subgrid kernel_subgrid = kernel_norm(kernel_subgrid) kernel_input = temp_kernel #from scipy.ndimage import zoom #ratio = subgrid_res #kernel_subgrid = zoom(kernel, ratio, order=4) / ratio ** 2 #print(np.shape(kernel_subgrid)) # whatever has not been matched is added to zeroth order (in squares of the undersampled PSF) if subgrid_res % 2 == 0: return kernel_subgrid kernel_pixel = util.averaging(kernel_subgrid, numGrid=nx_new, numPix=nx) kernel_pixel = kernel_norm(kernel_pixel) delta_kernel = kernel_pixel - kernel_norm(kernel) id = np.ones((subgrid_res, subgrid_res)) delta_kernel_sub = np.kron(delta_kernel, id)/subgrid_res**2 return kernel_norm(kernel_subgrid - delta_kernel_sub)
def __init__(self, kwargs_psf): self.psf_type = kwargs_psf.get('psf_type', 'NONE') if self.psf_type == 'GAUSSIAN': self._fwhm = kwargs_psf['fwhm'] self._sigma_gaussian = util.fwhm2sigma(self._fwhm) self._truncation = kwargs_psf.get('truncation', 5 * self._fwhm) if 'pixel_size' in kwargs_psf: self._pixel_size = kwargs_psf['pixel_size'] elif self.psf_type == 'PIXEL': if 'kernel_point_source_subsampled' in kwargs_psf: self._kernel_point_source_subsampled = kwargs_psf[ 'kernel_point_source_subsampled'] n_high = len(self._kernel_point_source_subsampled) self._point_source_subsampling_factor = kwargs_psf[ 'point_source_subsampling_factor'] numPix = int(n_high / self._point_source_subsampling_factor) self._kernel_point_source = util.averaging( self._kernel_point_source_subsampled, numGrid=n_high, numPix=numPix) else: self._kernel_point_source = kwargs_psf['kernel_point_source'] if 'kernel_pixel_subsampled' in kwargs_psf: self._kernel_pixel_subsampled = kwargs_psf[ 'kernel_pixel_subsampled'] n_high = len(self._kernel_point_source_subsampled) self._pixel_subsampling_factor = kwargs_psf[ 'pixel_subsampling_factor'] numPix = int(n_high / self._pixel_subsampling_factor) self._kernel_pixel = util.averaging( self._kernel_pixel_subsampled, numGrid=n_high, numPix=numPix) else: if 'kernel_pixel' in kwargs_psf: self._kernel_pixel = kwargs_psf['kernel_pixel'] else: self._kernel_pixel = kernel_util.pixel_kernel( self._kernel_point_source, subgrid_res=1) elif self.psf_type == 'NONE': self._kernel_point_source = np.zeros((3, 3)) self._kernel_point_source[1, 1] = 1 else: raise ValueError("psf_type %s not supported!" % self.psf_type) if 'psf_error_map' in kwargs_psf: self._psf_error_map = kwargs_psf['psf_error_map'] if len(self._psf_error_map) != len(self._kernel_point_source): raise ValueError( 'psf_error_map must have same size as kernel_point_source!' )
def degrade_kernel(kernel_super, degrading_factor): """ :param kernel_super: higher resolution kernel (odd number per axis) :param degrading_factor: degrading factor (effectively the super-sampling resolution of the kernel given :return: degraded kernel with odd axis number with the sum of the flux/values in the kernel being preserved """ if degrading_factor == 1: return kernel_super if degrading_factor % 2 == 0: kernel_low_res = averaging_even_kernel(kernel_super, degrading_factor) else: n_kernel = len(kernel_super) numPix = int(round(n_kernel / degrading_factor + 0.5)) if numPix % 2 == 0: numPix += 1 n_high = numPix * degrading_factor kernel_super_ = np.zeros((n_high, n_high)) i_start = int((n_high - n_kernel) / 2) kernel_super_[i_start:i_start + n_kernel, i_start:i_start + n_kernel] = kernel_super kernel_low_res = util.averaging( kernel_super_, numGrid=n_high, numPix=numPix ) * degrading_factor**2 # multiplicative factor added when providing flux conservation return kernel_low_res
def kernel_average_pixel(kernel_super, supersampling_factor): """ computes the effective convolution kernel assuming a uniform surface brightness on the scale of a pixel :param kernel_super: supersampled PSF of a point source (odd number per axis :param supersampling_factor: supersampling factor (int) :return: """ kernel_sum = np.sum(kernel_super) kernel_size = int( round(len(kernel_super) / float(supersampling_factor) + 0.5)) if kernel_size % 2 == 0: kernel_size += 1 n_high = kernel_size * supersampling_factor if n_high % 2 == 0: n_high += 1 kernel_pixel = np.zeros((n_high, n_high)) for i in range(supersampling_factor): k_x = int((kernel_size - 1) / 2 * supersampling_factor + i) for j in range(supersampling_factor): k_y = int((kernel_size - 1) / 2 * supersampling_factor + j) kernel_pixel = image_util.add_layer2image(kernel_pixel, k_x, k_y, kernel_super) if supersampling_factor % 2 == 0: kernel_pixel = averaging_even_kernel(kernel_pixel, supersampling_factor) else: kernel_pixel = util.averaging(kernel_pixel, numGrid=n_high, numPix=kernel_size) kernel_pixel /= np.sum(kernel_pixel) return kernel_pixel * kernel_sum
def test_subgrid_rebin(): kernel_size = 11 subgrid_res = 3 sigma = 1 x_grid, y_gird = Util.make_grid(kernel_size, 1./subgrid_res, subgrid_res) flux = gaussian.function(x_grid, y_gird, amp=1, sigma=sigma) kernel = Util.array2image(flux) print(np.shape(kernel)) kernel = util.averaging(kernel, numGrid=kernel_size * subgrid_res, numPix=kernel_size) kernel = kernel_util.kernel_norm(kernel) subgrid_kernel = kernel_util.subgrid_kernel(kernel, subgrid_res=subgrid_res, odd=True) kernel_pixel = util.averaging(subgrid_kernel, numGrid=kernel_size * subgrid_res, numPix=kernel_size) kernel_pixel = kernel_util.kernel_norm(kernel_pixel) assert np.sum((kernel_pixel - kernel)**2) < 0.1
def __init__(self, kernel_supersampled, supersampling_factor, supersampling_size=None, convolution_type='fft'): """ :param kernel_supersampled: kernel in supersampled pixels :param supersampling_factor: supersampling factor relative to the image pixel grid :param supersampling_size: number of pixels (in units of the image pixels) that are convolved with the supersampled kernel """ n_high = len(kernel_supersampled) self._supersampling_factor = supersampling_factor numPix = int(n_high / self._supersampling_factor) if self._supersampling_factor % 2 == 0: self._kernel = kernel_util.averaging_even_kernel( kernel_supersampled, self._supersampling_factor) else: self._kernel = util.averaging(kernel_supersampled, numGrid=n_high, numPix=numPix) if supersampling_size is None: kernel_low_res, kernel_high_res = np.zeros_like( self._kernel), kernel_supersampled self._low_res_convolution = False else: kernel_low_res, kernel_high_res = kernel_util.split_kernel( self._kernel, kernel_supersampled, supersampling_size, self._supersampling_factor) self._low_res_convolution = True self._low_res_conv = PixelKernelConvolution( kernel_low_res, convolution_type=convolution_type) self._high_res_conv = PixelKernelConvolution( kernel_high_res, convolution_type=convolution_type)
def __init__(self, kernel_super, supersampling_factor, conv_supersample_pixels, supersampling_kernel_size=None, compute_pixels=None, nopython=True, cache=True, parallel=False): """ :param kernel_super: convolution kernel in units of super sampled pixels provided, odd length per axis :param supersampling_factor: factor of supersampling relative to pixel grid :param conv_supersample_pixels: bool array same size as data, pixels to be convolved and their light to be blurred :param supersampling_kernel_size: number of pixels (in units of the image pixels) that are convolved with the supersampled kernel :param compute_pixels: bool array of size of image, these pixels (if True) will get blurred light from other pixels :param nopython: bool, numba jit setting to use python or compiled. :param cache: bool, numba jit setting to use cache :param parallel: bool, numba jit setting to use parallel mode """ #kernel_pixel = kernel_util.kernel_average_pixel(kernel_super, supersampling_factor) n_high = len(kernel_super) numPix = int(n_high / supersampling_factor) if supersampling_factor % 2 == 0: kernel = kernel_util.averaging_even_kernel(kernel_super, supersampling_factor) else: kernel = util.averaging(kernel_super, numGrid=n_high, numPix=numPix) kernel *= supersampling_factor**2 self._low_res_conv = PixelKernelConvolution(kernel, convolution_type='fft') if supersampling_kernel_size is None: supersampling_kernel_size = len(kernel) kernel_cut = image_util.cut_edges(kernel, supersampling_kernel_size) kernel_super_cut = image_util.cut_edges( kernel_super, supersampling_kernel_size * supersampling_factor) self._low_res_partial = NumbaConvolution(kernel_cut, conv_supersample_pixels, compute_pixels=compute_pixels, nopython=nopython, cache=cache, parallel=parallel, memory_raise=True) self._hig_res_partial = SubgridNumbaConvolution( kernel_super_cut, supersampling_factor, conv_supersample_pixels, compute_pixels=compute_pixels, nopython=nopython, cache=cache, parallel=parallel) #, kernel_size=len(kernel_cut)) self._supersampling_factor = supersampling_factor
def averaging_odd_kernel(kernel_super, degrading_factor): """ """ n_kernel = len(kernel_super) numPix = int(round(n_kernel / degrading_factor + 0.5)) if numPix % 2 == 0: numPix += 1 n_high = numPix * degrading_factor kernel_super_ = np.zeros((n_high, n_high)) i_start = int((n_high - n_kernel) / 2) kernel_super_[i_start:i_start + n_kernel, i_start:i_start + n_kernel] = kernel_super kernel_low_res = util.averaging(kernel_super_, numGrid=n_high, numPix=numPix) return kernel_low_res
def subgrid_kernel(kernel, subgrid_res, odd=False, num_iter=10): """ creates a higher resolution kernel with subgrid resolution as an interpolation of the original kernel in an iterative approach :param kernel: initial kernel :param subgrid_res: subgrid resolution required :return: kernel with higher resolution (larger) """ subgrid_res = int(subgrid_res) if subgrid_res == 1: return kernel nx, ny = np.shape(kernel) d_x = 1. / nx x_in = np.linspace(d_x / 2, 1 - d_x / 2, nx) d_y = 1. / nx y_in = np.linspace(d_y / 2, 1 - d_y / 2, ny) nx_new = nx * subgrid_res ny_new = ny * subgrid_res if odd is True: if nx_new % 2 == 0: nx_new -= 1 if ny_new % 2 == 0: ny_new -= 1 d_x_new = 1. / nx_new d_y_new = 1. / ny_new x_out = np.linspace(d_x_new / 2., 1 - d_x_new / 2., nx_new) y_out = np.linspace(d_y_new / 2., 1 - d_y_new / 2., ny_new) kernel_input = copy.deepcopy(kernel) kernel_subgrid = image_util.re_size_array(x_in, y_in, kernel_input, x_out, y_out) norm_subgrid = np.sum(kernel_subgrid) kernel_subgrid = kernel_norm(kernel_subgrid) for i in range(max(num_iter, 1)): if subgrid_res % 2 == 0: kernel_pixel = averaging_odd_kernel(kernel_subgrid, subgrid_res) else: kernel_pixel = util.averaging(kernel_subgrid, numGrid=nx_new, numPix=nx) kernel_pixel = kernel_norm(kernel_pixel) delta = kernel - kernel_pixel delta_subgrid = image_util.re_size_array(x_in, y_in, delta, x_out, y_out) / norm_subgrid kernel_subgrid += delta_subgrid kernel_subgrid = kernel_norm(kernel_subgrid) return kernel_subgrid
def pixel_kernel(point_source_kernel, subgrid_res=7): """ converts a pixelised kernel of a point source to a kernel representing a uniform extended pixel :param point_source_kernel: :param subgrid_res: :return: convolution kernel for an extended pixel """ kernel_subgrid = subgrid_kernel(point_source_kernel, subgrid_res, num_iter=10) kernel_size = len(point_source_kernel) kernel_pixel = np.zeros((kernel_size*subgrid_res, kernel_size*subgrid_res)) for i in range(subgrid_res): k_x = int((kernel_size-1) / 2 * subgrid_res + i) for j in range(subgrid_res): k_y = int((kernel_size-1) / 2 * subgrid_res + j) kernel_pixel = image_util.add_layer2image(kernel_pixel, k_x, k_y, kernel_subgrid) kernel_pixel = util.averaging(kernel_pixel, numGrid=kernel_size*subgrid_res, numPix=kernel_size) return kernel_norm(kernel_pixel)