예제 #1
0
파일: test_fft.py 프로젝트: acl-star/arlo
def test_fft(data_dir, shape):
    a = create_random_data(shape, -100, 100, 'complex')

    start = time.time()
    ia = fft(a)
    stop = time.time()
    print('Original Time:  {:.2f}s'.format(stop - start))

    store_data(os.path.join(data_dir, 'a.dat'), a)
    store_data(os.path.join(data_dir, 'ia.dat'), ia)
def predict_2d_base(vis: Union[BlockVisibility, Visibility], model: Image,
                    **kwargs) -> Union[BlockVisibility, Visibility]:
    """ Predict using convolutional degridding.

    This is at the bottom of the layering i.e. all transforms are eventually expressed in terms of
    this function. Any shifting needed is performed here.

    :param vis: Visibility to be predicted
    :param model: model image
    :return: resulting visibility (in place works)
    """
    if isinstance(vis, BlockVisibility):
        log.debug("imaging.predict: coalescing prior to prediction")
        avis = coalesce_visibility(vis, **kwargs)
    else:
        avis = vis

    assert isinstance(avis, Visibility), avis

    _, _, ny, nx = model.data.shape

    padding = {}
    if get_parameter(kwargs, "padding", False):
        padding = {'padding': get_parameter(kwargs, "padding", False)}
    spectral_mode, vfrequencymap = get_frequency_map(avis, model)
    polarisation_mode, vpolarisationmap = get_polarisation_map(avis, model)
    uvw_mode, shape, padding, vuvwmap = get_uvw_map(avis, model, **padding)
    kernel_name, gcf, vkernellist = get_kernel_list(avis, model, **kwargs)

    uvgrid = fft((pad_mid(model.data, int(round(padding * nx))) *
                  gcf).astype(dtype=complex))

    avis.data['vis'] = convolutional_degrid(vkernellist,
                                            avis.data['vis'].shape, uvgrid,
                                            vuvwmap, vfrequencymap,
                                            vpolarisationmap)

    # Now we can shift the visibility from the image frame to the original visibility frame
    svis = shift_vis_to_image(avis, model, tangent=True, inverse=True)

    if isinstance(vis, BlockVisibility) and isinstance(svis, Visibility):
        log.debug("imaging.predict decoalescing post prediction")
        return decoalesce_visibility(svis)
    else:
        return svis
예제 #3
0
def fft_image(im, template_image=None):
    """ FFT an image, transform WCS as well
    
    Prefer to use axes 'UU---SIN' and 'VV---SIN' but astropy will not accept.
    
    :param im:
    :param template:
    :return:
    """
    assert len(im.shape) == 4
    d2r = numpy.pi / 180.0
    ft_wcs = copy.deepcopy(im.wcs)
    ft_shape = im.shape
    if im.wcs.wcs.ctype[0] == 'RA---SIN' and im.wcs.wcs.ctype[1] == 'DEC--SIN':
        ft_wcs.wcs.axis_types[0] = 0
        ft_wcs.wcs.axis_types[1] = 0
        ft_wcs.wcs.crval[0] = 0.0
        ft_wcs.wcs.crval[1] = 0.0
        ft_wcs.wcs.crpix[0] = ft_shape[3] // 2 + 1
        ft_wcs.wcs.crpix[1] = ft_shape[2] // 2 + 1
        ft_wcs.wcs.ctype[0] = 'UU'
        ft_wcs.wcs.ctype[1] = 'VV'
        ft_wcs.wcs.cdelt[0] = 1.0 / (ft_shape[3] * d2r * im.wcs.wcs.cdelt[0])
        ft_wcs.wcs.cdelt[1] = 1.0 / (ft_shape[2] * d2r * im.wcs.wcs.cdelt[1])
        ft_data = ifft(im.data.astype('complex'))
        return create_image_from_array(
            ft_data, wcs=ft_wcs, polarisation_frame=im.polarisation_frame)
    elif im.wcs.wcs.ctype[0] == 'UU' and im.wcs.wcs.ctype[1] == 'VV':
        ft_wcs.wcs.crval[0] = template_image.wcs.wcs.crval[0]
        ft_wcs.wcs.crval[1] = template_image.wcs.wcs.crval[1]
        ft_wcs.wcs.crpix[0] = template_image.wcs.wcs.crpix[0]
        ft_wcs.wcs.crpix[0] = template_image.wcs.wcs.crpix[1]
        ft_wcs.wcs.ctype[0] = template_image.wcs.wcs.ctype[0]
        ft_wcs.wcs.ctype[1] = template_image.wcs.wcs.ctype[1]
        ft_wcs.wcs.cdelt[0] = template_image.wcs.wcs.cdelt[0]
        ft_wcs.wcs.cdelt[1] = template_image.wcs.wcs.cdelt[1]
        ft_data = fft(im.data.astype('complex'))
        return create_image_from_array(
            ft_data, wcs=ft_wcs, polarisation_frame=im.polarisation_frame)
    else:
        raise NotImplementedError("Cannot FFT specified axes")
예제 #4
0
파일: base.py 프로젝트: Luffky/arl_backup
def predict_2d_base(vis: Visibility, model: Image, **kwargs) -> Visibility:
    """ Predict using convolutional degridding.

    This is at the bottom of the layering i.e. all transforms are eventually expressed in terms of
    this function. Any shifting needed is performed here.

    :param vis: Visibility to be predicted
    :param model: model image
    :return: resulting visibility (in place works)
    """
    if type(vis) is not Visibility:
        avis = coalesce_visibility(vis, **kwargs)
    else:
        avis = vis
    _, _, ny, nx = model.data.shape
    # print(model.shape)
    spectral_mode, vfrequencymap = get_frequency_map(avis, model)  # 可以并行
    polarisation_mode, vpolarisationmap = get_polarisation_map(
        avis, model, **kwargs)  # 可以并行
    uvw_mode, shape, padding, vuvwmap = get_uvw_map(avis, model,
                                                    **kwargs)  # 可以并行
    kernel_name, gcf, vkernellist = get_kernel_list(avis, model, **kwargs)
    uvgrid = fft((pad_mid(model.data, int(round(padding * nx))) *
                  gcf).astype(dtype=complex))
    avis.data['vis'] = convolutional_degrid(vkernellist,
                                            avis.data['vis'].shape, uvgrid,
                                            vuvwmap, vfrequencymap,
                                            vpolarisationmap)

    # Now we can shift the visibility from the image frame to the original visibility frame
    svis = shift_vis_to_image(avis, model, tangent=True, inverse=True)

    if type(vis) is not Visibility:
        return decoalesce_visibility(svis)
    else:
        return svis
예제 #5
0
def predict_2d_base_timing(vis: Visibility, model: Image,
                           **kwargs) -> (Visibility, tuple):
    """ Predict using convolutional degridding.

    This is at the bottom of the layering i.e. all transforms are eventually expressed in terms of
    this function. Any shifting needed is performed here.

    :param vis: Visibility to be predicted
    :param model: model image
    :return: resulting visibility (in place works)
    """
    if not isinstance(vis, Visibility):
        avis = coalesce_visibility(vis, **kwargs)
    else:
        avis = vis

    _, _, ny, nx = model.data.shape

    opt = get_parameter(kwargs, 'opt', False)
    if not opt:
        log.debug('Using original algorithm')
    else:
        log.debug('Using optimized algorithm')

    padding = {}
    if get_parameter(kwargs, "padding", False):
        padding = {'padding': get_parameter(kwargs, "padding", False)}
    spectral_mode, vfrequencymap = get_frequency_map(avis, model, opt)
    polarisation_mode, vpolarisationmap = get_polarisation_map(avis, model)
    uvw_mode, shape, padding, vuvwmap = get_uvw_map(avis, model, **padding)
    kernel_name, gcf, vkernellist = get_kernel_list(avis, model, **kwargs)
    inarr = (pad_mid(model.data, int(round(padding * nx))) *
             gcf).astype(dtype=complex)

    # Use original algorithm
    if not opt:
        time_fft = -time.time()
        uvgrid = fft(inarr)
        time_fft += time.time()

        time_degrid = -time.time()
        vt = convolutional_degrid(vkernellist, avis.data['vis'].shape, uvgrid,
                                  vuvwmap, vfrequencymap, vpolarisationmap)
        time_degrid += time.time()

    # Use optimized algorithm
    else:
        time_fft = -time.time()
        uvgrid = numpy.zeros(inarr.shape, dtype=inarr.dtype)
        fft_c(uvgrid, inarr)
        time_fft += time.time()

        time_degrid = -time.time()
        kernel_indices, kernels = vkernellist
        ks0, ks1, ks2, ks3 = kernels[0].shape
        kernels_c = numpy.zeros((len(kernels), ks0, ks1, ks2, ks3),
                                dtype=kernels[0].dtype)
        for i in range(len(kernels)):
            kernels_c[i, ...] = kernels[i]

        vfrequencymap_c = numpy.array(vfrequencymap, dtype=numpy.int32)
        vt = numpy.zeros(avis.data['vis'].shape, dtype=numpy.complex128)
        convolutional_degrid_c(vt, native_order(kernels_c),
                               native_order(kernel_indices),
                               native_order(uvgrid), native_order(vuvwmap),
                               native_order(vfrequencymap_c))
        time_degrid += time.time()

    avis.data['vis'] = vt

    # Now we can shift the visibility from the image frame to the original visibility frame
    svis = shift_vis_to_image(avis, model, tangent=True, inverse=True)

    if not isinstance(vis, Visibility):
        svis = decoalesce_visibility(svis)

    return svis, (time_degrid, time_fft)