Exemple #1
0
def visibility_scatter_w(vis: Visibility, **kwargs) -> List[Visibility]:
    if isinstance(vis, BlockVisibility):
        avis = coalesce_visibility(vis, **(kwargs))
        visibility_list = visibility_scatter(avis, vis_iter=vis_wstack_iter, **kwargs)
    else:
        visibility_list = visibility_scatter(vis, vis_iter=vis_wstack_iter, **kwargs)
        
    return visibility_list
 def test_coalesce_decoalesce_time(self):
     cvis = coalesce_visibility(self.blockvis, time_coal=1.0, frequency_coal=0.0, max_frequency_coal=1)
     assert numpy.min(cvis.frequency) == numpy.min(self.frequency)
     assert numpy.min(cvis.frequency) > 0.0
     dvis = decoalesce_visibility(cvis)
     assert dvis.nvis == self.blockvis.nvis
     dvis = decoalesce_visibility(cvis, overwrite=True)
     assert dvis.nvis == self.blockvis.nvis
 def scatter_vis(vis):
     # Scatter along the visibility iteration axis
     if isinstance(vis, BlockVisibility):
         avis = coalesce_visibility(vis, **kwargs)
     else:
         avis = vis
     result = [
         create_visibility_from_rows(avis, rows)
         for rows in vis_iter(avis, vis_slices=vis_slices, **kwargs)
     ]
     return result
 def scatter_vis(vis):
     if isinstance(vis, BlockVisibility):
         avis = coalesce_visibility(vis, **kwargs)
     else:
         avis = vis
     result = [
         create_visibility_from_rows(avis, rows)
         for rows in vis_iter(avis, vis_slices=vis_slices, **kwargs)
     ]
     assert len(result) == vis_slices, "result %s, vis_slices %d" % (
         str(result), vis_slices)
     return result
 def test_coalesce_decoalesce_singletime(self):
     self.times = numpy.array([0.0])
     self.blockvis = create_blockvisibility(self.lowcore, self.times, self.frequency, phasecentre=self.phasecentre,
                                            weight=1.0, polarisation_frame=PolarisationFrame('stokesI'),
                                            channel_bandwidth=self.channel_bandwidth)
     # Fill in the vis values so each can be uniquely identified
     self.blockvis.data['vis'] = range(self.blockvis.nvis)
     cvis = coalesce_visibility(self.blockvis, time_coal=1.0)
     assert numpy.min(cvis.frequency) == numpy.min(self.frequency)
     assert numpy.min(cvis.frequency) > 0.0
     dvis = decoalesce_visibility(cvis)
     assert dvis.nvis == self.blockvis.nvis
Exemple #6
0
def visibility_gather_w(visibility_list, vis, **kwargs):
    if type(vis) == BlockVisibility:
        cvis = coalesce_visibility(vis, **kwargs)
        return decoalesce_visibility(
            visibility_gather(visibility_list,
                              cvis,
                              vis_iter=vis_wstack_iter,
                              **kwargs))
    else:
        return visibility_gather(visibility_list,
                                 vis,
                                 vis_iter=vis_wstack_iter,
                                 **kwargs)
Exemple #7
0
def invert_timeslice_single(vis: Visibility,
                            im: Image,
                            dopsf,
                            normalize=True,
                            **kwargs) -> (Image, numpy.ndarray):
    """Process single time slice
    
    Extracted for re-use in parallel version
    :param vis: Visibility to be inverted
    :param im: image template (not changed)
    :param dopsf: Make the psf instead of the dirty image
    :param normalize: Normalize by the sum of weights (True)
    """
    inchan, inpol, ny, nx = im.shape

    if not isinstance(vis, Visibility):
        avis = coalesce_visibility(vis, **kwargs)
    else:
        avis = vis

    log.debug("invert_timeslice: inverting using time slices")

    avis, p, q = fit_uvwplane(avis, remove=True)

    workimage, sumwt = invert_2d_base(avis,
                                      im,
                                      dopsf,
                                      normalize=normalize,
                                      **kwargs)

    finalimage = create_empty_image_like(im)

    # Use griddata to do the conversion. This could be improved. Only cubic is possible in griddata.
    # The interpolation is ok for invert since the image is smooth.

    # Calculate nominal and distorted coordinates. The image is in distorted coordinates so we
    # need to convert back to nominal
    lnominal, mnominal, ldistorted, mdistorted = lm_distortion(
        workimage, -p, -q)

    for chan in range(inchan):
        for pol in range(inpol):
            finalimage.data[chan, pol, ...] = \
                griddata((mdistorted.flatten(), ldistorted.flatten()),
                         values=workimage.data[chan, pol, ...].flatten(),
                         method='cubic',
                         xi=(mnominal.flatten(), lnominal.flatten()),
                         fill_value=0.0,
                         rescale=True).reshape(finalimage.data[chan, pol, ...].shape)

    return finalimage, sumwt
Exemple #8
0
def predict_timeslice_single(vis: Visibility,
                             model: Image,
                             predict=predict_2d_base,
                             remove=True,
                             **kwargs) -> Visibility:
    """ Predict using a single time slices.
    
    This fits a single plane and corrects the image geometry.

    :param vis: Visibility to be predicted
    :param model: model image
    :param predict:
    :param remove: Remove fitted w (so that wprojection will do the right thing)
    :return: resulting visibility (in place works)
    """
    log.debug("predict_timeslice: predicting using time slices")

    inchan, inpol, ny, nx = model.shape

    vis.data['vis'] *= 0.0

    if not isinstance(vis, Visibility):
        avis = coalesce_visibility(vis, **kwargs)
    else:
        avis = vis

    # Fit and remove best fitting plane for this slice
    avis, p, q = fit_uvwplane(avis, remove=remove)

    # Calculate nominal and distorted coordinate systems. We will convert the model
    # from nominal to distorted before predicting.
    workimage = copy_image(model)

    # Use griddata to do the conversion. This could be improved. Only cubic is possible in griddata.
    # The interpolation is ok for invert since the image is smooth but for clean images the
    # interpolation is particularly poor, leading to speckle in the residual image.
    lnominal, mnominal, ldistorted, mdistorted = lm_distortion(model, -p, -q)
    for chan in range(inchan):
        for pol in range(inpol):
            workimage.data[chan, pol, ...] = \
                griddata((mnominal.flatten(), lnominal.flatten()),
                         values=workimage.data[chan, pol, ...].flatten(),
                         xi=(mdistorted.flatten(), ldistorted.flatten()),
                         method='cubic',
                         fill_value=0.0,
                         rescale=True).reshape(workimage.data[chan, pol, ...].shape)

    avis = predict(avis, workimage, **kwargs)

    return avis
 def test_predict_sky_components_coalesce(self):
     sc = create_low_test_skycomponents_from_gleam(flux_limit=10.0,
                                                   polarisation_frame=PolarisationFrame("stokesI"),
                                                   frequency=self.frequency, kind='cubic',
                                                   phasecentre=SkyCoord("17h20m31s", "-00d58m45s"),
                                                   radius=0.1)
     self.config = create_named_configuration('LOWBD2-CORE')
     self.phasecentre = SkyCoord("17h20m31s", "-00d58m45s")
     sampling_time = 3.76
     self.times = numpy.arange(0.0, + 300 * sampling_time, sampling_time)
     self.vis = create_blockvisibility(self.config, self.times, self.frequency, phasecentre=self.phasecentre,
                                       weight=1.0, polarisation_frame=PolarisationFrame('stokesI'),
                                       channel_bandwidth=self.channel_bandwidth)
     self.vis = predict_skycomponent_blockvisibility(self.vis, sc)
     cvt = coalesce_visibility(self.vis, time_coal=1.0)
     assert cvt.cindex is not None
    def gather_vis(results, vis):
        # Gather across the visibility iteration axis
        assert vis is not None
        if isinstance(vis, BlockVisibility):
            avis = coalesce_visibility(vis, **kwargs)
        else:
            avis = vis
        for i, rows in enumerate(
                vis_iter(avis, vis_slices=vis_slices, **kwargs)):
            assert i < len(results), "Insufficient results for the gather"
            if rows is not None and results[i] is not None:
                avis.data['vis'][rows] = results[i].data['vis']

        if isinstance(vis, BlockVisibility):
            return decoalesce_visibility(avis, **kwargs)
        else:
            return avis
def predict_2d_base(vis: Union[BlockVisibility, Visibility], model: Image,
                    **kwargs) -> Union[BlockVisibility, Visibility]:
    """ Predict using convolutional degridding.

    This is at the bottom of the layering i.e. all transforms are eventually expressed in terms of
    this function. Any shifting needed is performed here.

    :param vis: Visibility to be predicted
    :param model: model image
    :return: resulting visibility (in place works)
    """
    if isinstance(vis, BlockVisibility):
        log.debug("imaging.predict: coalescing prior to prediction")
        avis = coalesce_visibility(vis, **kwargs)
    else:
        avis = vis

    assert isinstance(avis, Visibility), avis

    _, _, ny, nx = model.data.shape

    padding = {}
    if get_parameter(kwargs, "padding", False):
        padding = {'padding': get_parameter(kwargs, "padding", False)}
    spectral_mode, vfrequencymap = get_frequency_map(avis, model)
    polarisation_mode, vpolarisationmap = get_polarisation_map(avis, model)
    uvw_mode, shape, padding, vuvwmap = get_uvw_map(avis, model, **padding)
    kernel_name, gcf, vkernellist = get_kernel_list(avis, model, **kwargs)

    uvgrid = fft((pad_mid(model.data, int(round(padding * nx))) *
                  gcf).astype(dtype=complex))

    avis.data['vis'] = convolutional_degrid(vkernellist,
                                            avis.data['vis'].shape, uvgrid,
                                            vuvwmap, vfrequencymap,
                                            vpolarisationmap)

    # Now we can shift the visibility from the image frame to the original visibility frame
    svis = shift_vis_to_image(avis, model, tangent=True, inverse=True)

    if isinstance(vis, BlockVisibility) and isinstance(svis, Visibility):
        log.debug("imaging.predict decoalescing post prediction")
        return decoalesce_visibility(svis)
    else:
        return svis
def predict_wstack_single(vis, model, remove=True, **kwargs) -> Visibility:
    """ Predict using a single w slices.
    
    This processes a single w plane, rotating out the w beam for the average w

    :param vis: Visibility to be predicted
    :param model: model image
    :return: resulting visibility (in place works)
    """

    if not isinstance(vis, Visibility):
        log.debug("predict_wstack_single: Coalescing")
        avis = coalesce_visibility(vis, **kwargs)
    else:
        avis = vis

    log.debug("predict_wstack_single: predicting using single w slice")

    avis.data['vis'] *= 0.0
    # We might want to do wprojection so we remove the average w
    w_average = numpy.average(avis.w)
    avis.data['uvw'][..., 2] -= w_average
    tempvis = copy_visibility(avis)

    # Calculate w beam and apply to the model. The imaginary part is not needed
    workimage = copy_image(model)
    w_beam = create_w_term_like(model, w_average, vis.phasecentre)

    # Do the real part
    workimage.data = w_beam.data.real * model.data
    avis = predict_2d_base(avis, workimage, **kwargs)

    # and now the imaginary part
    workimage.data = w_beam.data.imag * model.data
    tempvis = predict_2d_base(tempvis, workimage, **kwargs)
    avis.data['vis'] -= 1j * tempvis.data['vis']

    if not remove:
        avis.data['uvw'][..., 2] += w_average

    if isinstance(vis, BlockVisibility) and isinstance(avis, Visibility):
        log.debug("imaging.predict decoalescing post prediction")
        return decoalesce_visibility(avis)
    else:
        return avis
Exemple #13
0
def predict_with_vis_iterator(vis: Visibility, model: Image, vis_iter=vis_slice_iter,
                              predict=predict_2d, **kwargs) -> Visibility:
    """Iterate through prediction in chunks
    
    This knows about the structure of predict in different execution frameworks but not
    anything about the actual processing.
    
    """
    log.debug("predict_with_vis_iterator: Processing chunks")
    if not isinstance(vis, Visibility):
        svis = coalesce_visibility(vis, **kwargs)
    else:
        svis = vis
        
    # Do each chunk in turn
    for rows in vis_iter(svis, **kwargs):
        if numpy.sum(rows) and svis is not None:
            visslice = create_visibility_from_rows(svis, rows)
            visslice.data['vis'][...] = 0.0
            visslice = predict(visslice, model, **kwargs)
            svis.data['vis'][rows] += visslice.data['vis']
    return svis
Exemple #14
0
def invert_with_vis_iterator(vis: Visibility, im: Image, dopsf=False, normalize=True, vis_iter=vis_slice_iter,
                             invert=invert_2d, **kwargs):
    """ Invert using a specified iterator and invert

    This knows about the structure of invert in different execution frameworks but not
    anything about the actual processing.

    :param vis:
    :param im:
    :param dopsf: Make the psf instead of the dirty image
    :param normalize: Normalize by the sum of weights (True)
    :param kwargs:
    :return:
    """
    resultimage = create_empty_image_like(im)

    if type(vis) is not Visibility:
        svis = coalesce_visibility(vis, **kwargs)
    else:
        svis = vis


    i = 0
    for rows in vis_iter(svis, **kwargs):
        if numpy.sum(rows) and svis is not None:
            visslice = create_visibility_from_rows(svis, rows)
            workimage, sumwt = invert(visslice, im, dopsf, normalize=False, **kwargs)
            resultimage.data += workimage.data
            if i == 0:
                totalwt = sumwt
            else:
                totalwt += sumwt
            i += 1

    if normalize:
        resultimage = normalize_sumwt(resultimage, totalwt)

    return resultimage, totalwt
Exemple #15
0
def predict_2d_base(vis: Visibility, model: Image, **kwargs) -> Visibility:
    """ Predict using convolutional degridding.

    This is at the bottom of the layering i.e. all transforms are eventually expressed in terms of
    this function. Any shifting needed is performed here.

    :param vis: Visibility to be predicted
    :param model: model image
    :return: resulting visibility (in place works)
    """
    if type(vis) is not Visibility:
        avis = coalesce_visibility(vis, **kwargs)
    else:
        avis = vis
    _, _, ny, nx = model.data.shape
    # print(model.shape)
    spectral_mode, vfrequencymap = get_frequency_map(avis, model)  # 可以并行
    polarisation_mode, vpolarisationmap = get_polarisation_map(
        avis, model, **kwargs)  # 可以并行
    uvw_mode, shape, padding, vuvwmap = get_uvw_map(avis, model,
                                                    **kwargs)  # 可以并行
    kernel_name, gcf, vkernellist = get_kernel_list(avis, model, **kwargs)
    uvgrid = fft((pad_mid(model.data, int(round(padding * nx))) *
                  gcf).astype(dtype=complex))
    avis.data['vis'] = convolutional_degrid(vkernellist,
                                            avis.data['vis'].shape, uvgrid,
                                            vuvwmap, vfrequencymap,
                                            vpolarisationmap)

    # Now we can shift the visibility from the image frame to the original visibility frame
    svis = shift_vis_to_image(avis, model, tangent=True, inverse=True)

    if type(vis) is not Visibility:
        return decoalesce_visibility(svis)
    else:
        return svis
Exemple #16
0
def invert_2d_base_timing(vis: Visibility, im: Image, dopsf: bool = False, normalize: bool = True, **kwargs) \
        -> (Image, numpy.ndarray, tuple):
    """ Invert using 2D convolution function, including w projection optionally

    Use the image im as a template. Do PSF in a separate call.

    This is at the bottom of the layering i.e. all transforms are eventually expressed in terms
    of this function. . Any shifting needed is performed here.

    :param vis: Visibility to be inverted
    :param im: image template (not changed)
    :param dopsf: Make the psf instead of the dirty image
    :param normalize: Normalize by the sum of weights (True)
    :return: resulting image

    """
    opt = get_parameter(kwargs, 'opt', False)
    if not opt:
        log.debug('Using original algorithm')
    else:
        log.debug('Using optimized algorithm')

    if not isinstance(vis, Visibility):
        svis = coalesce_visibility(vis, **kwargs)
    else:
        svis = copy_visibility(vis)

    if dopsf:
        svis.data['vis'] = numpy.ones_like(svis.data['vis'])

    svis = shift_vis_to_image(svis, im, tangent=True, inverse=False)

    nchan, npol, ny, nx = im.data.shape

    padding = {}
    if get_parameter(kwargs, "padding", False):
        padding = {'padding': get_parameter(kwargs, "padding", False)}
    spectral_mode, vfrequencymap = get_frequency_map(svis, im, opt)
    polarisation_mode, vpolarisationmap = get_polarisation_map(svis, im)
    uvw_mode, shape, padding, vuvwmap = get_uvw_map(svis, im, **padding)
    kernel_name, gcf, vkernellist = get_kernel_list(svis, im, **kwargs)

    # Optionally pad to control aliasing
    imgridpad = numpy.zeros(
        [nchan, npol,
         int(round(padding * ny)),
         int(round(padding * nx))],
        dtype='complex')

    # Use original algorithm
    if not opt:
        time_grid = -time.time()
        imgridpad, sumwt = convolutional_grid(vkernellist, imgridpad,
                                              svis.data['vis'],
                                              svis.data['imaging_weight'],
                                              vuvwmap, vfrequencymap,
                                              vpolarisationmap)
        time_grid += time.time()
    # Use optimized algorithm
    else:
        time_grid = -time.time()
        kernel_indices, kernels = vkernellist
        ks0, ks1, ks2, ks3 = kernels[0].shape
        kernels_c = numpy.zeros((len(kernels), ks0, ks1, ks2, ks3),
                                dtype=kernels[0].dtype)
        for i in range(len(kernels)):
            kernels_c[i, ...] = kernels[i]

        vfrequencymap_c = numpy.array(vfrequencymap, dtype=numpy.int32)
        sumwt = numpy.zeros((imgridpad.shape[0], imgridpad.shape[1]),
                            dtype=numpy.float64)

        convolutional_grid_c(imgridpad, sumwt, native_order(svis.data['vis']),
                             native_order(svis.data['imaging_weight']),
                             native_order(kernels_c),
                             native_order(kernel_indices),
                             native_order(vuvwmap),
                             native_order(vfrequencymap_c))
        time_grid += time.time()

    # Fourier transform the padded grid to image, multiply by the gridding correction
    # function, and extract the unpadded inner part.

    # Normalise weights for consistency with transform
    sumwt /= float(padding * int(round(padding * nx)) * ny)

    imaginary = get_parameter(kwargs, "imaginary", False)
    if imaginary:
        log.debug("invert_2d_base: retaining imaginary part of dirty image")
        result = extract_mid(ifft(imgridpad) * gcf, npixel=nx)
        resultreal = create_image_from_array(result.real, im.wcs)
        resultimag = create_image_from_array(result.imag, im.wcs)
        if normalize:
            resultreal = normalize_sumwt(resultreal, sumwt)
            resultimag = normalize_sumwt(resultimag, sumwt)
        return resultreal, sumwt, resultimag
    else:
        # Use original algorithm
        if not opt:
            time_ifft = -time.time()
            inarr = ifft(imgridpad)
            time_ifft += time.time()

        # Use optimized algorithm
        else:
            time_ifft = -time.time()
            inarr = numpy.zeros(imgridpad.shape, dtype=imgridpad.dtype)
            ifft_c(inarr, imgridpad)
            time_ifft += time.time()

        result = extract_mid(numpy.real(inarr) * gcf, npixel=nx)
        resultimage = create_image_from_array(result, im.wcs)
        if normalize:
            resultimage = normalize_sumwt(resultimage, sumwt)
        return resultimage, sumwt, (time_grid, time_ifft)
Exemple #17
0
def predict_2d_base_timing(vis: Visibility, model: Image,
                           **kwargs) -> (Visibility, tuple):
    """ Predict using convolutional degridding.

    This is at the bottom of the layering i.e. all transforms are eventually expressed in terms of
    this function. Any shifting needed is performed here.

    :param vis: Visibility to be predicted
    :param model: model image
    :return: resulting visibility (in place works)
    """
    if not isinstance(vis, Visibility):
        avis = coalesce_visibility(vis, **kwargs)
    else:
        avis = vis

    _, _, ny, nx = model.data.shape

    opt = get_parameter(kwargs, 'opt', False)
    if not opt:
        log.debug('Using original algorithm')
    else:
        log.debug('Using optimized algorithm')

    padding = {}
    if get_parameter(kwargs, "padding", False):
        padding = {'padding': get_parameter(kwargs, "padding", False)}
    spectral_mode, vfrequencymap = get_frequency_map(avis, model, opt)
    polarisation_mode, vpolarisationmap = get_polarisation_map(avis, model)
    uvw_mode, shape, padding, vuvwmap = get_uvw_map(avis, model, **padding)
    kernel_name, gcf, vkernellist = get_kernel_list(avis, model, **kwargs)
    inarr = (pad_mid(model.data, int(round(padding * nx))) *
             gcf).astype(dtype=complex)

    # Use original algorithm
    if not opt:
        time_fft = -time.time()
        uvgrid = fft(inarr)
        time_fft += time.time()

        time_degrid = -time.time()
        vt = convolutional_degrid(vkernellist, avis.data['vis'].shape, uvgrid,
                                  vuvwmap, vfrequencymap, vpolarisationmap)
        time_degrid += time.time()

    # Use optimized algorithm
    else:
        time_fft = -time.time()
        uvgrid = numpy.zeros(inarr.shape, dtype=inarr.dtype)
        fft_c(uvgrid, inarr)
        time_fft += time.time()

        time_degrid = -time.time()
        kernel_indices, kernels = vkernellist
        ks0, ks1, ks2, ks3 = kernels[0].shape
        kernels_c = numpy.zeros((len(kernels), ks0, ks1, ks2, ks3),
                                dtype=kernels[0].dtype)
        for i in range(len(kernels)):
            kernels_c[i, ...] = kernels[i]

        vfrequencymap_c = numpy.array(vfrequencymap, dtype=numpy.int32)
        vt = numpy.zeros(avis.data['vis'].shape, dtype=numpy.complex128)
        convolutional_degrid_c(vt, native_order(kernels_c),
                               native_order(kernel_indices),
                               native_order(uvgrid), native_order(vuvwmap),
                               native_order(vfrequencymap_c))
        time_degrid += time.time()

    avis.data['vis'] = vt

    # Now we can shift the visibility from the image frame to the original visibility frame
    svis = shift_vis_to_image(avis, model, tangent=True, inverse=True)

    if not isinstance(vis, Visibility):
        svis = decoalesce_visibility(svis)

    return svis, (time_degrid, time_fft)
Exemple #18
0
def invert_2d_base(vis: Visibility, im: Image, dopsf: bool = False, normalize: bool = True, **kwargs) \
        -> (Image, numpy.ndarray):
    """ Invert using 2D convolution function, including w projection optionally

    Use the image im as a template. Do PSF in a separate call.

    This is at the bottom of the layering i.e. all transforms are eventually expressed in terms
    of this function. . Any shifting needed is performed here.

    :param vis: Visibility to be inverted
    :param im: image template (not changed)
    :param dopsf: Make the psf instead of the dirty image
    :param normalize: Normalize by the sum of weights (True)
    :return: resulting image

    """
    if type(vis) is not Visibility:
        svis = coalesce_visibility(vis, **kwargs)
    else:
        svis = copy_visibility(vis)

    if dopsf:
        svis.data['vis'] = numpy.ones_like(svis.data['vis'])

    svis = shift_vis_to_image(svis, im, tangent=True, inverse=False)

    nchan, npol, ny, nx = im.data.shape

    spectral_mode, vfrequencymap = get_frequency_map(svis, im)
    polarisation_mode, vpolarisationmap = get_polarisation_map(
        svis, im, **kwargs)
    uvw_mode, shape, padding, vuvwmap = get_uvw_map(svis, im, **kwargs)
    kernel_name, gcf, vkernellist = get_kernel_list(svis, im, **kwargs)

    # Optionally pad to control aliasing
    imgridpad = numpy.zeros(
        [nchan, npol,
         int(round(padding * ny)),
         int(round(padding * nx))],
        dtype='complex')
    imgridpad, sumwt = convolutional_grid(vkernellist, imgridpad,
                                          svis.data['vis'],
                                          svis.data['imaging_weight'], vuvwmap,
                                          vfrequencymap, vpolarisationmap)

    # Fourier transform the padded grid to image, multiply by the gridding correction
    # function, and extract the unpadded inner part.

    # Normalise weights for consistency with transform
    sumwt /= float(padding * int(round(padding * nx)) * ny)

    imaginary = get_parameter(kwargs, "imaginary", False)
    if imaginary:
        log.debug("invert_2d_base: retaining imaginary part of dirty image")
        result = extract_mid(ifft(imgridpad) * gcf, npixel=nx)
        resultreal = create_image_from_array(result.real, im.wcs)
        resultimag = create_image_from_array(result.imag, im.wcs)
        if normalize:
            resultreal = normalize_sumwt(resultreal, sumwt)
            resultimag = normalize_sumwt(resultimag, sumwt)
        return resultreal, sumwt, resultimag
    else:
        result = extract_mid(numpy.real(ifft(imgridpad)) * gcf, npixel=nx)
        resultimage = create_image_from_array(result, im.wcs)
        if normalize:
            resultimage = normalize_sumwt(resultimage, sumwt)
        return resultimage, sumwt
Exemple #19
0
def visibility_gather_w(visibility_list: List[Visibility], vis: Visibility, **kwargs) -> Visibility:
    if isinstance(vis, BlockVisibility):
        cvis = coalesce_visibility(vis, **kwargs)
        return decoalesce_visibility(visibility_gather(visibility_list, cvis, vis_iter=vis_wstack_iter, **kwargs))
    else:
        return visibility_gather(visibility_list, vis, vis_iter=vis_wstack_iter, **kwargs)
 def scatter_vis(vis):
     if isinstance(vis, BlockVisibility):
         avis = coalesce_visibility(vis, **kwargs)
     else:
         avis = vis
     return [create_visibility_from_rows(vis, rows) for rows in vis_iter(avis, vis_slices=vis_slices, **kwargs)]
Exemple #21
0
 def test_coalesce_decoalesce_tbgrid_vis_null(self):
     cvis = coalesce_visibility(self.blockvis, time_coal=0.0)
     assert numpy.min(cvis.frequency) == numpy.min(self.frequency)
     assert numpy.min(cvis.frequency) > 0.0
Exemple #22
0
def visibility_scatter_w(vis, **kwargs):
    if type(vis) == BlockVisibility:
        avis = coalesce_visibility(vis, **(kwargs))
        return visibility_scatter(avis, vis_iter=vis_wstack_iter, **kwargs)
    else:
        return visibility_scatter(vis, vis_iter=vis_wstack_iter, **kwargs)
Exemple #23
0
image_graph = create_test_image(
    frequency=frequency,
    phasecentre=phasecentre,
    cellsize=0.001,
    polarisation_frame=PolarisationFrame('linear'))  # data [2,4,256,256]
blockvis = create_blockvisibility(
    lowcore,
    times=times,
    frequency=frequency,
    channel_bandwidth=channel_bandwidth,
    phasecentre=phasecentre,
    weight=1,
    polarisation_frame=PolarisationFrame('linear'),
    integration_time=1.0)
# 创建用blockvis的vis, 压缩率为0
vis = coalesce_visibility(blockvis)
# vis = create_visibility(lowcore, times=times, frequency=frequency,
#                         channel_bandwidth=channel_bandwidth,
#                         phasecentre=phasecentre, weight=1,
#                         polarisation_frame=PolarisationFrame('stokesIQUV'),
#                         integration_time=1.0)  # vis [baselines, times, nchan, npol]


class TestImageIterators(unittest.TestCase):
    #===predict_module===
    # def test_visibility_class(self):
    #     for mode in ['pol', 'npol', 'chan']:
    #         viss, vis_share = visibility_to_visibility_para(vis, mode)
    #         new_vis = visibility_para_to_visibility(viss, mode, vis_share)
    #         visibility_right(vis, new_vis)
    #         print("%s visibility test passed" % mode)
def invert_function(vis,
                    im: Image,
                    dopsf=False,
                    normalize=True,
                    context='2d',
                    inner=None,
                    **kwargs):
    """ Invert using algorithm specified by context:

     * 2d: Two-dimensional transform
     * wstack: wstacking with either vis_slices or wstack (spacing between w planes) set
     * wprojection: w projection with wstep (spacing between w places) set, also kernel='wprojection'
     * timeslice: snapshot imaging with either vis_slices or timeslice set. timeslice='auto' does every time
     * facets: Faceted imaging with facets facets on each axis
     * facets_wprojection: facets AND wprojection
     * facets_wstack: facets AND wstacking
     * wprojection_wstack: wprojection and wstacking


    :param vis:
    :param im:
    :param dopsf: Make the psf instead of the dirty image (False)
    :param normalize: Normalize by the sum of weights (True)
    :param context: Imaging context e.g. '2d', 'timeslice', etc.
    :param inner: Inner loop 'vis'|'image'
    :param kwargs:
    :return: Image, sum of weights
    """
    c = imaging_context(context)
    vis_iter = c['vis_iterator']
    image_iter = c['image_iterator']
    invert = c['invert']
    if inner is None:
        inner = c['inner']

    if not isinstance(vis, Visibility):
        svis = coalesce_visibility(vis, **kwargs)
    else:
        svis = vis

    resultimage = create_empty_image_like(im)

    if inner == 'image':
        totalwt = None
        for rows in vis_iter(svis, **kwargs):
            if numpy.sum(rows):
                visslice = create_visibility_from_rows(svis, rows)
                sumwt = 0.0
                workimage = create_empty_image_like(im)
                for dpatch in image_iter(workimage, **kwargs):
                    result, sumwt = invert(visslice,
                                           dpatch,
                                           dopsf,
                                           normalize=False,
                                           **kwargs)
                    # Ensure that we fill in the elements of dpatch instead of creating a new numpy arrray
                    dpatch.data[...] = result.data[...]
                # Assume that sumwt is the same for all patches
                if totalwt is None:
                    totalwt = sumwt
                else:
                    totalwt += sumwt
                resultimage.data += workimage.data
    else:
        # We assume that the weight is the same for all image iterations
        totalwt = None
        workimage = create_empty_image_like(im)
        for dpatch in image_iter(workimage, **kwargs):
            totalwt = None
            for rows in vis_iter(svis, **kwargs):
                if numpy.sum(rows):
                    visslice = create_visibility_from_rows(svis, rows)
                    result, sumwt = invert(visslice,
                                           dpatch,
                                           dopsf,
                                           normalize=False,
                                           **kwargs)
                    # Ensure that we fill in the elements of dpatch instead of creating a new numpy arrray
                    dpatch.data[...] += result.data[...]
                    if totalwt is None:
                        totalwt = sumwt
                    else:
                        totalwt += sumwt
            resultimage.data += workimage.data
            workimage.data[...] = 0.0

    assert totalwt is not None, "No valid data found for imaging"
    if normalize:
        resultimage = normalize_sumwt(resultimage, totalwt)

    return resultimage, totalwt
def predict_function(vis,
                     model: Image,
                     context='2d',
                     inner=None,
                     **kwargs) -> Visibility:
    """Predict visibilities using algorithm specified by context
    
     * 2d: Two-dimensional transform
     * wstack: wstacking with either vis_slices or wstack (spacing between w planes) set
     * wprojection: w projection with wstep (spacing between w places) set, also kernel='wprojection'
     * timeslice: snapshot imaging with either vis_slices or timeslice set. timeslice='auto' does every time
     * facets: Faceted imaging with facets facets on each axis
     * facets_wprojection: facets AND wprojection
     * facets_wstack: facets AND wstacking
     * wprojection_wstack: wprojection and wstacking

    
    :param vis:
    :param model: Model image, used to determine image characteristics
    :param context: Imaing context e.g. '2d', 'timeslice', etc.
    :param inner: Inner loop 'vis'|'image'
    :param kwargs:
    :return:


    """
    c = imaging_context(context)
    vis_iter = c['vis_iterator']
    image_iter = c['image_iterator']
    predict = c['predict']
    if inner is None:
        inner = c['inner']

    if not isinstance(vis, Visibility):
        svis = coalesce_visibility(vis, **kwargs)
    else:
        svis = vis

    result = copy_visibility(vis, zero=True)

    if inner == 'image':
        for rows in vis_iter(svis, **kwargs):
            if numpy.sum(rows):
                visslice = create_visibility_from_rows(svis, rows)
                visslice.data['vis'][...] = 0.0
                # Iterate over images
                for dpatch in image_iter(model, **kwargs):
                    result.data['vis'][...] = 0.0
                    result = predict(visslice, dpatch, **kwargs)
                    svis.data['vis'][rows] += result.data['vis']
    else:
        # Iterate over images
        for dpatch in image_iter(model, **kwargs):
            for rows in vis_iter(svis, **kwargs):
                if numpy.sum(rows):
                    visslice = create_visibility_from_rows(svis, rows)
                    result.data['vis'][...] = 0.0
                    result = predict(visslice, dpatch, **kwargs)
                    svis.data['vis'][rows] += result.data['vis']

    return svis