def test_pad_extract(self): for npixel, N2 in [(100, 128), (128, 256), (126, 128)]: # Make a 2D complex image of size (npixel, npixel) centred on (npixel//2, npixel//2) cs = 1 + self._pattern(npixel) # Pad it and extract npixel pixels around the centre cs_pad = pad_mid(cs, N2) # Now create the pattern we expect directly cs2 = 1 + self._pattern(N2) * N2 / npixel # At this point all fields in cs2 and cs_pad should either # be equal or zero. equal = numpy.abs(cs_pad - cs2) < 1e-15 zero = numpy.abs(cs_pad) < 1e-15 assert (equal + zero).all(), "Pad (%d, %d) failed" % (npixel, N2) # And extracting the middle should recover the original data assert_allclose(extract_mid(cs_pad, npixel), cs)
def predict_2d_base(vis: Union[BlockVisibility, Visibility], model: Image, **kwargs) -> Union[BlockVisibility, Visibility]: """ Predict using convolutional degridding. This is at the bottom of the layering i.e. all transforms are eventually expressed in terms of this function. Any shifting needed is performed here. :param vis: Visibility to be predicted :param model: model image :return: resulting visibility (in place works) """ if isinstance(vis, BlockVisibility): log.debug("imaging.predict: coalescing prior to prediction") avis = coalesce_visibility(vis, **kwargs) else: avis = vis assert isinstance(avis, Visibility), avis _, _, ny, nx = model.data.shape padding = {} if get_parameter(kwargs, "padding", False): padding = {'padding': get_parameter(kwargs, "padding", False)} spectral_mode, vfrequencymap = get_frequency_map(avis, model) polarisation_mode, vpolarisationmap = get_polarisation_map(avis, model) uvw_mode, shape, padding, vuvwmap = get_uvw_map(avis, model, **padding) kernel_name, gcf, vkernellist = get_kernel_list(avis, model, **kwargs) uvgrid = fft((pad_mid(model.data, int(round(padding * nx))) * gcf).astype(dtype=complex)) avis.data['vis'] = convolutional_degrid(vkernellist, avis.data['vis'].shape, uvgrid, vuvwmap, vfrequencymap, vpolarisationmap) # Now we can shift the visibility from the image frame to the original visibility frame svis = shift_vis_to_image(avis, model, tangent=True, inverse=True) if isinstance(vis, BlockVisibility) and isinstance(svis, Visibility): log.debug("imaging.predict decoalescing post prediction") return decoalesce_visibility(svis) else: return svis
def predict_2d_base(vis: Visibility, model: Image, **kwargs) -> Visibility: """ Predict using convolutional degridding. This is at the bottom of the layering i.e. all transforms are eventually expressed in terms of this function. Any shifting needed is performed here. :param vis: Visibility to be predicted :param model: model image :return: resulting visibility (in place works) """ if type(vis) is not Visibility: avis = coalesce_visibility(vis, **kwargs) else: avis = vis _, _, ny, nx = model.data.shape # print(model.shape) spectral_mode, vfrequencymap = get_frequency_map(avis, model) # 可以并行 polarisation_mode, vpolarisationmap = get_polarisation_map( avis, model, **kwargs) # 可以并行 uvw_mode, shape, padding, vuvwmap = get_uvw_map(avis, model, **kwargs) # 可以并行 kernel_name, gcf, vkernellist = get_kernel_list(avis, model, **kwargs) uvgrid = fft((pad_mid(model.data, int(round(padding * nx))) * gcf).astype(dtype=complex)) avis.data['vis'] = convolutional_degrid(vkernellist, avis.data['vis'].shape, uvgrid, vuvwmap, vfrequencymap, vpolarisationmap) # Now we can shift the visibility from the image frame to the original visibility frame svis = shift_vis_to_image(avis, model, tangent=True, inverse=True) if type(vis) is not Visibility: return decoalesce_visibility(svis) else: return svis
def predict_2d_base_timing(vis: Visibility, model: Image, **kwargs) -> (Visibility, tuple): """ Predict using convolutional degridding. This is at the bottom of the layering i.e. all transforms are eventually expressed in terms of this function. Any shifting needed is performed here. :param vis: Visibility to be predicted :param model: model image :return: resulting visibility (in place works) """ if not isinstance(vis, Visibility): avis = coalesce_visibility(vis, **kwargs) else: avis = vis _, _, ny, nx = model.data.shape opt = get_parameter(kwargs, 'opt', False) if not opt: log.debug('Using original algorithm') else: log.debug('Using optimized algorithm') padding = {} if get_parameter(kwargs, "padding", False): padding = {'padding': get_parameter(kwargs, "padding", False)} spectral_mode, vfrequencymap = get_frequency_map(avis, model, opt) polarisation_mode, vpolarisationmap = get_polarisation_map(avis, model) uvw_mode, shape, padding, vuvwmap = get_uvw_map(avis, model, **padding) kernel_name, gcf, vkernellist = get_kernel_list(avis, model, **kwargs) inarr = (pad_mid(model.data, int(round(padding * nx))) * gcf).astype(dtype=complex) # Use original algorithm if not opt: time_fft = -time.time() uvgrid = fft(inarr) time_fft += time.time() time_degrid = -time.time() vt = convolutional_degrid(vkernellist, avis.data['vis'].shape, uvgrid, vuvwmap, vfrequencymap, vpolarisationmap) time_degrid += time.time() # Use optimized algorithm else: time_fft = -time.time() uvgrid = numpy.zeros(inarr.shape, dtype=inarr.dtype) fft_c(uvgrid, inarr) time_fft += time.time() time_degrid = -time.time() kernel_indices, kernels = vkernellist ks0, ks1, ks2, ks3 = kernels[0].shape kernels_c = numpy.zeros((len(kernels), ks0, ks1, ks2, ks3), dtype=kernels[0].dtype) for i in range(len(kernels)): kernels_c[i, ...] = kernels[i] vfrequencymap_c = numpy.array(vfrequencymap, dtype=numpy.int32) vt = numpy.zeros(avis.data['vis'].shape, dtype=numpy.complex128) convolutional_degrid_c(vt, native_order(kernels_c), native_order(kernel_indices), native_order(uvgrid), native_order(vuvwmap), native_order(vfrequencymap_c)) time_degrid += time.time() avis.data['vis'] = vt # Now we can shift the visibility from the image frame to the original visibility frame svis = shift_vis_to_image(avis, model, tangent=True, inverse=True) if not isinstance(vis, Visibility): svis = decoalesce_visibility(svis) return svis, (time_degrid, time_fft)