Example #1
0
def sum_visibility(vis: Visibility, direction: SkyCoord) -> numpy.array:
    """ Direct Fourier summation in a given direction

    :param vis: Visibility to be summed
    :param direction: Direction of summation
    :return: flux[nch,npol], weight[nch,pol]
    """
    # TODO: Convert to Visibility or remove?

    svis = copy_visibility(vis)

    l, m, n = skycoord_to_lmn(direction, svis.phasecentre)
    phasor = numpy.conjugate(simulate_point(svis.uvw, l, m))

    # Need to put correct mapping here
    _, frequency = get_frequency_map(svis, None)

    frequency = list(frequency)

    nchan = max(frequency) + 1
    npol = svis.polarisation_frame.npol

    flux = numpy.zeros([nchan, npol])
    weight = numpy.zeros([nchan, npol])

    coords = svis.vis, svis.weight, phasor, list(frequency)
    for v, wt, p, ic in zip(*coords):
        for pol in range(npol):
            flux[ic, pol] += numpy.real(wt[pol] * v[pol] * p)
            weight[ic, pol] += wt[pol]

    flux[weight > 0.0] = flux[weight > 0.0] / weight[weight > 0.0]
    flux[weight <= 0.0] = 0.0
    return flux, weight
def predict_skycomponent_visibility_old(vis: Visibility, sc: Union[Skycomponent, List[Skycomponent]]) -> Visibility:
    """Predict the visibility from a Skycomponent, add to existing visibility, for Visibility

    :param vis: Visibility
    :param sc: Skycomponent or list of SkyComponents
    :return: Visibility
    """
    assert isinstance(vis, Visibility), "vis is not a Visibility: %r" % vis
    
    if not isinstance(sc, collections.Iterable):
        sc = [sc]
    
    _, im_nchan = list(get_frequency_map(vis, None))
    npol = vis.polarisation_frame.npol
    
    for comp in sc:
        
        assert_same_chan_pol(vis, comp)
        
        l, m, n = skycoord_to_lmn(comp.direction, vis.phasecentre)
        phasor = simulate_point(vis.uvw, l, m)
        for ivis in range(vis.nvis):
            for pol in range(npol):
                vis.data['vis'][ivis, pol] += comp.flux[im_nchan[ivis], pol] * phasor[ivis]
            
            # coords = phasor, ichan
            # for pol in range(npol):
            #     vis.data['vis'][:,pol] += [comp.flux[ic, pol] * p for p, ic in zip(*coords)]
    
    return vis
Example #3
0
 def test_get_frequency_map_different_channel(self):
     self.model = create_image_from_visibility(self.vis, npixel=512, cellsize=0.001,
                                               frequency=self.startfrequency, nchan=3,
                                               channel_bandwidth=2e7)
     spectral_mode, vfrequency_map = get_frequency_map(self.vis, self.model)
     assert numpy.max(vfrequency_map) == self.model.nchan - 1
     assert spectral_mode == 'channel'
def weight_visibility(vis: Visibility, im: Image, **kwargs) -> Visibility:
    """ Reweight the visibility data using a selected algorithm

    Imaging uses the column "imaging_weight" when imaging. This function sets that column using a
    variety of algorithms
    
    Options are:
        - Natural: by visibility weight (optimum for noise in final image)
        - Uniform: weight of sample divided by sum of weights in cell (optimum for sidelobes)
        - Super-uniform: As uniform, by sum of weights is over extended box region
        - Briggs: Compromise between natural and uniform
        - Super-briggs: As Briggs, by sum of weights is over extended box region

    :param vis:
    :param im:
    :return: visibility with imaging_weights column added and filled
    """
    assert isinstance(vis, Visibility), "vis is not a Visibility: %r" % vis

    assert get_parameter(kwargs, "padding", False) is False
    spectral_mode, vfrequencymap = get_frequency_map(vis, im)
    polarisation_mode, vpolarisationmap = get_polarisation_map(vis, im)
    uvw_mode, shape, padding, vuvwmap = get_uvw_map(vis, im)

    density = None
    densitygrid = None

    weighting = get_parameter(kwargs, "weighting", "uniform")
    vis.data['imaging_weight'], density, densitygrid = weight_gridding(
        im.data.shape, vis.data['weight'], vuvwmap, vfrequencymap,
        vpolarisationmap, weighting)

    return vis, density, densitygrid
Example #5
0
 def test_get_frequency_map_channel(self):
     self.model = create_image_from_visibility(self.vis, npixel=512, cellsize=0.001,
                                               nchan=self.vnchan,
                                               frequency=self.startfrequency)
     spectral_mode, vfrequency_map = get_frequency_map(self.vis, self.model)
     assert numpy.max(vfrequency_map) == self.model.nchan - 1
     assert numpy.min(vfrequency_map) == 0
     assert spectral_mode == 'channel'
Example #6
0
 def test_get_frequency_map_s3(self):
     self.model = create_low_test_image_from_s3(
         npixel=64,
         cellsize=0.001,
         frequency=self.frequency,
         channel_bandwidth=self.channel_bandwidth)
     spectral_mode, vfrequency_map = get_frequency_map(self.vis, self.model)
     assert numpy.max(vfrequency_map) == self.model.nchan - 1
     assert spectral_mode == 'channel'
Example #7
0
def predict_skycomponent_visibility(
        vis: Union[Visibility, BlockVisibility],
        sc: Union[Skycomponent, List[Skycomponent]]) -> Visibility:
    """Predict the visibility from a Skycomponent, add to existing visibility, for Visibility or BlockVisibility

    :param vis: Visibility or BlockVisibility
    :param sc: Skycomponent or list of SkyComponents
    :return: Visibility or BlockVisibility
    """
    if not isinstance(sc, collections.Iterable):
        sc = [sc]

    if isinstance(vis, Visibility):

        _, im_nchan = list(get_frequency_map(vis, None))
        npol = vis.polarisation_frame.npol

        for comp in sc:

            assert_same_chan_pol(vis, comp)

            l, m, n = skycoord_to_lmn(comp.direction, vis.phasecentre)
            phasor = simulate_point(vis.uvw, l, m)
            for ivis in range(vis.nvis):
                for pol in range(npol):
                    vis.data['vis'][ivis, pol] += comp.flux[im_nchan[ivis],
                                                            pol] * phasor[ivis]

    elif isinstance(vis, BlockVisibility):

        nchan = vis.nchan
        npol = vis.npol

        k = numpy.array(vis.frequency) / constants.c.to('m/s').value

        for comp in sc:
            assert_same_chan_pol(vis, comp)

            flux = comp.flux
            if comp.polarisation_frame != vis.polarisation_frame:
                flux = convert_pol_frame(flux, comp.polarisation_frame,
                                         vis.polarisation_frame)

            l, m, n = skycoord_to_lmn(comp.direction, vis.phasecentre)
            for chan in range(nchan):
                phasor = simulate_point(vis.uvw * k[chan], l, m)
                for pol in range(npol):
                    vis.data['vis'][..., chan,
                                    pol] += flux[chan, pol] * phasor[...]

    return vis
def predict_2d_base(vis: Union[BlockVisibility, Visibility], model: Image,
                    **kwargs) -> Union[BlockVisibility, Visibility]:
    """ Predict using convolutional degridding.

    This is at the bottom of the layering i.e. all transforms are eventually expressed in terms of
    this function. Any shifting needed is performed here.

    :param vis: Visibility to be predicted
    :param model: model image
    :return: resulting visibility (in place works)
    """
    if isinstance(vis, BlockVisibility):
        log.debug("imaging.predict: coalescing prior to prediction")
        avis = coalesce_visibility(vis, **kwargs)
    else:
        avis = vis

    assert isinstance(avis, Visibility), avis

    _, _, ny, nx = model.data.shape

    padding = {}
    if get_parameter(kwargs, "padding", False):
        padding = {'padding': get_parameter(kwargs, "padding", False)}
    spectral_mode, vfrequencymap = get_frequency_map(avis, model)
    polarisation_mode, vpolarisationmap = get_polarisation_map(avis, model)
    uvw_mode, shape, padding, vuvwmap = get_uvw_map(avis, model, **padding)
    kernel_name, gcf, vkernellist = get_kernel_list(avis, model, **kwargs)

    uvgrid = fft((pad_mid(model.data, int(round(padding * nx))) *
                  gcf).astype(dtype=complex))

    avis.data['vis'] = convolutional_degrid(vkernellist,
                                            avis.data['vis'].shape, uvgrid,
                                            vuvwmap, vfrequencymap,
                                            vpolarisationmap)

    # Now we can shift the visibility from the image frame to the original visibility frame
    svis = shift_vis_to_image(avis, model, tangent=True, inverse=True)

    if isinstance(vis, BlockVisibility) and isinstance(svis, Visibility):
        log.debug("imaging.predict decoalescing post prediction")
        return decoalesce_visibility(svis)
    else:
        return svis
Example #9
0
def predict_2d_base(vis: Visibility, model: Image, **kwargs) -> Visibility:
    """ Predict using convolutional degridding.

    This is at the bottom of the layering i.e. all transforms are eventually expressed in terms of
    this function. Any shifting needed is performed here.

    :param vis: Visibility to be predicted
    :param model: model image
    :return: resulting visibility (in place works)
    """
    if type(vis) is not Visibility:
        avis = coalesce_visibility(vis, **kwargs)
    else:
        avis = vis
    _, _, ny, nx = model.data.shape
    # print(model.shape)
    spectral_mode, vfrequencymap = get_frequency_map(avis, model)  # 可以并行
    polarisation_mode, vpolarisationmap = get_polarisation_map(
        avis, model, **kwargs)  # 可以并行
    uvw_mode, shape, padding, vuvwmap = get_uvw_map(avis, model,
                                                    **kwargs)  # 可以并行
    kernel_name, gcf, vkernellist = get_kernel_list(avis, model, **kwargs)
    uvgrid = fft((pad_mid(model.data, int(round(padding * nx))) *
                  gcf).astype(dtype=complex))
    avis.data['vis'] = convolutional_degrid(vkernellist,
                                            avis.data['vis'].shape, uvgrid,
                                            vuvwmap, vfrequencymap,
                                            vpolarisationmap)

    # Now we can shift the visibility from the image frame to the original visibility frame
    svis = shift_vis_to_image(avis, model, tangent=True, inverse=True)

    if type(vis) is not Visibility:
        return decoalesce_visibility(svis)
    else:
        return svis
Example #10
0
def invert_2d_base_timing(vis: Visibility, im: Image, dopsf: bool = False, normalize: bool = True, **kwargs) \
        -> (Image, numpy.ndarray, tuple):
    """ Invert using 2D convolution function, including w projection optionally

    Use the image im as a template. Do PSF in a separate call.

    This is at the bottom of the layering i.e. all transforms are eventually expressed in terms
    of this function. . Any shifting needed is performed here.

    :param vis: Visibility to be inverted
    :param im: image template (not changed)
    :param dopsf: Make the psf instead of the dirty image
    :param normalize: Normalize by the sum of weights (True)
    :return: resulting image

    """
    opt = get_parameter(kwargs, 'opt', False)
    if not opt:
        log.debug('Using original algorithm')
    else:
        log.debug('Using optimized algorithm')

    if not isinstance(vis, Visibility):
        svis = coalesce_visibility(vis, **kwargs)
    else:
        svis = copy_visibility(vis)

    if dopsf:
        svis.data['vis'] = numpy.ones_like(svis.data['vis'])

    svis = shift_vis_to_image(svis, im, tangent=True, inverse=False)

    nchan, npol, ny, nx = im.data.shape

    padding = {}
    if get_parameter(kwargs, "padding", False):
        padding = {'padding': get_parameter(kwargs, "padding", False)}
    spectral_mode, vfrequencymap = get_frequency_map(svis, im, opt)
    polarisation_mode, vpolarisationmap = get_polarisation_map(svis, im)
    uvw_mode, shape, padding, vuvwmap = get_uvw_map(svis, im, **padding)
    kernel_name, gcf, vkernellist = get_kernel_list(svis, im, **kwargs)

    # Optionally pad to control aliasing
    imgridpad = numpy.zeros(
        [nchan, npol,
         int(round(padding * ny)),
         int(round(padding * nx))],
        dtype='complex')

    # Use original algorithm
    if not opt:
        time_grid = -time.time()
        imgridpad, sumwt = convolutional_grid(vkernellist, imgridpad,
                                              svis.data['vis'],
                                              svis.data['imaging_weight'],
                                              vuvwmap, vfrequencymap,
                                              vpolarisationmap)
        time_grid += time.time()
    # Use optimized algorithm
    else:
        time_grid = -time.time()
        kernel_indices, kernels = vkernellist
        ks0, ks1, ks2, ks3 = kernels[0].shape
        kernels_c = numpy.zeros((len(kernels), ks0, ks1, ks2, ks3),
                                dtype=kernels[0].dtype)
        for i in range(len(kernels)):
            kernels_c[i, ...] = kernels[i]

        vfrequencymap_c = numpy.array(vfrequencymap, dtype=numpy.int32)
        sumwt = numpy.zeros((imgridpad.shape[0], imgridpad.shape[1]),
                            dtype=numpy.float64)

        convolutional_grid_c(imgridpad, sumwt, native_order(svis.data['vis']),
                             native_order(svis.data['imaging_weight']),
                             native_order(kernels_c),
                             native_order(kernel_indices),
                             native_order(vuvwmap),
                             native_order(vfrequencymap_c))
        time_grid += time.time()

    # Fourier transform the padded grid to image, multiply by the gridding correction
    # function, and extract the unpadded inner part.

    # Normalise weights for consistency with transform
    sumwt /= float(padding * int(round(padding * nx)) * ny)

    imaginary = get_parameter(kwargs, "imaginary", False)
    if imaginary:
        log.debug("invert_2d_base: retaining imaginary part of dirty image")
        result = extract_mid(ifft(imgridpad) * gcf, npixel=nx)
        resultreal = create_image_from_array(result.real, im.wcs)
        resultimag = create_image_from_array(result.imag, im.wcs)
        if normalize:
            resultreal = normalize_sumwt(resultreal, sumwt)
            resultimag = normalize_sumwt(resultimag, sumwt)
        return resultreal, sumwt, resultimag
    else:
        # Use original algorithm
        if not opt:
            time_ifft = -time.time()
            inarr = ifft(imgridpad)
            time_ifft += time.time()

        # Use optimized algorithm
        else:
            time_ifft = -time.time()
            inarr = numpy.zeros(imgridpad.shape, dtype=imgridpad.dtype)
            ifft_c(inarr, imgridpad)
            time_ifft += time.time()

        result = extract_mid(numpy.real(inarr) * gcf, npixel=nx)
        resultimage = create_image_from_array(result, im.wcs)
        if normalize:
            resultimage = normalize_sumwt(resultimage, sumwt)
        return resultimage, sumwt, (time_grid, time_ifft)
Example #11
0
def predict_2d_base_timing(vis: Visibility, model: Image,
                           **kwargs) -> (Visibility, tuple):
    """ Predict using convolutional degridding.

    This is at the bottom of the layering i.e. all transforms are eventually expressed in terms of
    this function. Any shifting needed is performed here.

    :param vis: Visibility to be predicted
    :param model: model image
    :return: resulting visibility (in place works)
    """
    if not isinstance(vis, Visibility):
        avis = coalesce_visibility(vis, **kwargs)
    else:
        avis = vis

    _, _, ny, nx = model.data.shape

    opt = get_parameter(kwargs, 'opt', False)
    if not opt:
        log.debug('Using original algorithm')
    else:
        log.debug('Using optimized algorithm')

    padding = {}
    if get_parameter(kwargs, "padding", False):
        padding = {'padding': get_parameter(kwargs, "padding", False)}
    spectral_mode, vfrequencymap = get_frequency_map(avis, model, opt)
    polarisation_mode, vpolarisationmap = get_polarisation_map(avis, model)
    uvw_mode, shape, padding, vuvwmap = get_uvw_map(avis, model, **padding)
    kernel_name, gcf, vkernellist = get_kernel_list(avis, model, **kwargs)
    inarr = (pad_mid(model.data, int(round(padding * nx))) *
             gcf).astype(dtype=complex)

    # Use original algorithm
    if not opt:
        time_fft = -time.time()
        uvgrid = fft(inarr)
        time_fft += time.time()

        time_degrid = -time.time()
        vt = convolutional_degrid(vkernellist, avis.data['vis'].shape, uvgrid,
                                  vuvwmap, vfrequencymap, vpolarisationmap)
        time_degrid += time.time()

    # Use optimized algorithm
    else:
        time_fft = -time.time()
        uvgrid = numpy.zeros(inarr.shape, dtype=inarr.dtype)
        fft_c(uvgrid, inarr)
        time_fft += time.time()

        time_degrid = -time.time()
        kernel_indices, kernels = vkernellist
        ks0, ks1, ks2, ks3 = kernels[0].shape
        kernels_c = numpy.zeros((len(kernels), ks0, ks1, ks2, ks3),
                                dtype=kernels[0].dtype)
        for i in range(len(kernels)):
            kernels_c[i, ...] = kernels[i]

        vfrequencymap_c = numpy.array(vfrequencymap, dtype=numpy.int32)
        vt = numpy.zeros(avis.data['vis'].shape, dtype=numpy.complex128)
        convolutional_degrid_c(vt, native_order(kernels_c),
                               native_order(kernel_indices),
                               native_order(uvgrid), native_order(vuvwmap),
                               native_order(vfrequencymap_c))
        time_degrid += time.time()

    avis.data['vis'] = vt

    # Now we can shift the visibility from the image frame to the original visibility frame
    svis = shift_vis_to_image(avis, model, tangent=True, inverse=True)

    if not isinstance(vis, Visibility):
        svis = decoalesce_visibility(svis)

    return svis, (time_degrid, time_fft)
Example #12
0
def invert_2d_base(vis: Visibility, im: Image, dopsf: bool = False, normalize: bool = True, **kwargs) \
        -> (Image, numpy.ndarray):
    """ Invert using 2D convolution function, including w projection optionally

    Use the image im as a template. Do PSF in a separate call.

    This is at the bottom of the layering i.e. all transforms are eventually expressed in terms
    of this function. . Any shifting needed is performed here.

    :param vis: Visibility to be inverted
    :param im: image template (not changed)
    :param dopsf: Make the psf instead of the dirty image
    :param normalize: Normalize by the sum of weights (True)
    :return: resulting image

    """
    if type(vis) is not Visibility:
        svis = coalesce_visibility(vis, **kwargs)
    else:
        svis = copy_visibility(vis)

    if dopsf:
        svis.data['vis'] = numpy.ones_like(svis.data['vis'])

    svis = shift_vis_to_image(svis, im, tangent=True, inverse=False)

    nchan, npol, ny, nx = im.data.shape

    spectral_mode, vfrequencymap = get_frequency_map(svis, im)
    polarisation_mode, vpolarisationmap = get_polarisation_map(
        svis, im, **kwargs)
    uvw_mode, shape, padding, vuvwmap = get_uvw_map(svis, im, **kwargs)
    kernel_name, gcf, vkernellist = get_kernel_list(svis, im, **kwargs)

    # Optionally pad to control aliasing
    imgridpad = numpy.zeros(
        [nchan, npol,
         int(round(padding * ny)),
         int(round(padding * nx))],
        dtype='complex')
    imgridpad, sumwt = convolutional_grid(vkernellist, imgridpad,
                                          svis.data['vis'],
                                          svis.data['imaging_weight'], vuvwmap,
                                          vfrequencymap, vpolarisationmap)

    # Fourier transform the padded grid to image, multiply by the gridding correction
    # function, and extract the unpadded inner part.

    # Normalise weights for consistency with transform
    sumwt /= float(padding * int(round(padding * nx)) * ny)

    imaginary = get_parameter(kwargs, "imaginary", False)
    if imaginary:
        log.debug("invert_2d_base: retaining imaginary part of dirty image")
        result = extract_mid(ifft(imgridpad) * gcf, npixel=nx)
        resultreal = create_image_from_array(result.real, im.wcs)
        resultimag = create_image_from_array(result.imag, im.wcs)
        if normalize:
            resultreal = normalize_sumwt(resultreal, sumwt)
            resultimag = normalize_sumwt(resultimag, sumwt)
        return resultreal, sumwt, resultimag
    else:
        result = extract_mid(numpy.real(ifft(imgridpad)) * gcf, npixel=nx)
        resultimage = create_image_from_array(result, im.wcs)
        if normalize:
            resultimage = normalize_sumwt(resultimage, sumwt)
        return resultimage, sumwt