def test_convert_image_to_kernel(self): m31image = create_test_image(cellsize=0.001, frequency=[1e8], canonical=True) screen = create_w_term_like(m31image, w=20000.0, remove_shift=True) screen_fft = fft_image(screen) converted = convert_image_to_kernel(screen_fft, 8, 8) assert converted.shape == (1, 1, 8, 8, 8, 8) with self.assertRaises(AssertionError): converted = convert_image_to_kernel(m31image, 15, 1) with self.assertRaises(AssertionError): converted = convert_image_to_kernel(m31image, 15, 1000)
def w_kernel_list(vis: Visibility, im: Image, oversampling=1, wstep=50.0, kernelwidth=16, **kwargs): """ Calculate w convolution kernels Uses create_w_term_like to calculate the w screen. This is exactly as wstacking does. Returns (indices to the w kernel for each row, kernels) Each kernel has axes [centre_v, centre_u, offset_v, offset_u]. We currently use the same convolution function for all channels and polarisations. Changing that behaviour would require modest changes here and to the gridding/degridding routines. :param vis: visibility :param image: Template image (padding, if any, occurs before this) :param oversampling: Oversampling factor :param wstep: Step in w between cached functions :return: (indices to the w kernel for each row, kernels) """ nchan, npol, ny, nx = im.shape gcf, _ = anti_aliasing_calculate((ny, nx)) assert oversampling % 2 == 0 or oversampling == 1, "oversampling must be unity or even" assert kernelwidth % 2 == 0, "kernelwidth must be even" wmaxabs = numpy.max(numpy.abs(vis.w)) log.debug( "w_kernel_list: Maximum absolute w = %.1f, step is %.1f wavelengths" % (wmaxabs, wstep)) def digitise(w, wstep): return numpy.ceil((w + wmaxabs) / wstep).astype('int') # Find all the unique indices for which we need a kernel nwsteps = digitise(wmaxabs, wstep) + 1 w_list = numpy.linspace(-wmaxabs, +wmaxabs, nwsteps) print('====', nwsteps, wstep, len(w_list)) wtemplate = copy_image(im) wtemplate.data = numpy.zeros(wtemplate.shape, dtype=im.data.dtype) padded_shape = list(wtemplate.shape) padded_shape[3] *= oversampling padded_shape[2] *= oversampling # For all the unique indices, calculate the corresponding w kernel kernels = list() for w in w_list: # Make a w screen wscreen = create_w_term_like(wtemplate, w, vis.phasecentre, **kwargs) wscreen.data /= gcf assert numpy.max(numpy.abs(wscreen.data)) > 0.0, 'w screen is empty' wscreen_padded = pad_image(wscreen, padded_shape) wconv = fft_image(wscreen_padded) wconv.data *= float(oversampling)**2 # For the moment, ignore the polarisation and channel axes kernels.append( convert_image_to_kernel(wconv, oversampling, kernelwidth).data[0, 0, ...]) # Now make a lookup table from row number of vis to the kernel kernel_indices = digitise(vis.w, wstep) assert numpy.max(kernel_indices) < len(kernels), "wabsmax %f wstep %f" % ( wmaxabs, wstep) assert numpy.min(kernel_indices) >= 0, "wabsmax %f wstep %f" % (wmaxabs, wstep) return kernel_indices, kernels