示例#1
0
def restore_cube(model: Image, psf: Image, residual=None, **kwargs) -> Image:
    """ Restore the model image to the residuals

    :params psf: Input PSF
    :return: restored image

    """
    assert isinstance(model, Image), model
    assert image_is_canonical(model)
    assert isinstance(psf, Image), psf
    assert image_is_canonical(psf)

    assert residual is None or isinstance(residual, Image), residual
    assert image_is_canonical(residual)

    restored = copy_image(model)

    npixel = psf.data.shape[3]
    sl = slice(npixel // 2 - 7, npixel // 2 + 8)

    size = get_parameter(kwargs, "psfwidth", None)

    if size is None:
        # isotropic at the moment!
        from scipy.optimize import minpack
        try:
            fit = fit_2dgaussian(psf.data[0, 0, sl, sl])
            if fit.x_stddev <= 0.0 or fit.y_stddev <= 0.0:
                log.debug(
                    'restore_cube: error in fitting to psf, using 1 pixel stddev'
                )
                size = 1.0
            else:
                size = max(fit.x_stddev, fit.y_stddev)
                log.debug('restore_cube: psfwidth = %s' % (size))
        except minpack.error as err:
            log.debug('restore_cube: minpack error, using 1 pixel stddev')
            size = 1.0
        except ValueError as err:
            log.debug(
                'restore_cube: warning in fit to psf, using 1 pixel stddev')
            size = 1.0
    else:
        log.debug('restore_cube: Using specified psfwidth = %s' % (size))

    # By convention, we normalise the peak not the integral so this is the volume of the Gaussian
    norm = 2.0 * numpy.pi * size**2
    gk = Gaussian2DKernel(size)
    for chan in range(model.shape[0]):
        for pol in range(model.shape[1]):
            restored.data[chan, pol, :, :] = norm * convolve_fft(
                model.data[chan, pol, :, :],
                gk,
                normalize_kernel=False,
                allow_huge=True)
    if residual is not None:
        restored.data += residual.data
    return restored
def get_frequency_map(vis, im: Image = None):
    """ Map channels from visibilities to image

    """

    # Find the unique frequencies in the visibility
    ufrequency = numpy.unique(vis.frequency)
    vnchan = len(ufrequency)

    if im is None:
        spectral_mode = 'channel'
        if vis.frequency_map is None:
            vfrequencymap = get_rowmap(vis.frequency, ufrequency)
            vis.frequencymap = vfrequencymap
        else:
            vfrequencymap = vis.frequency_map

        assert min(
            vfrequencymap
        ) >= 0, "Invalid frequency map: visibility channel < 0: %s" % str(
            vfrequencymap)

    elif im.data.shape[0] == 1 and vnchan >= 1:
        assert image_is_canonical(im)

        spectral_mode = 'mfs'
        if vis.frequency_map is None:
            vfrequencymap = numpy.zeros_like(vis.frequency, dtype='int')
            vis.frequencymap = vfrequencymap
        else:
            vfrequencymap = vis.frequency_map

    else:
        assert image_is_canonical(im)

        # We can map these to image channels
        v2im_map = im.wcs.sub(['spectral']).wcs_world2pix(ufrequency,
                                                          0)[0].astype('int')

        spectral_mode = 'channel'
        nrows = len(vis.frequency)
        row2vis = numpy.array(get_rowmap(vis.frequency, ufrequency))
        vfrequencymap = [v2im_map[row2vis[row]] for row in range(nrows)]

        assert min(vfrequencymap
                   ) >= 0, "Invalid frequency map: image channel < 0 %s" % str(
                       vfrequencymap)
        assert max(vfrequencymap) < im.shape[0], "Invalid frequency map: image channel > number image channels %s" % \
                                                 str(vfrequencymap)

    return spectral_mode, vfrequencymap
示例#3
0
def image_gather_channels(image_list: List[Image],
                          im: Image = None,
                          subimages=0) -> Image:
    """Gather a list of subimages back into an image using the channel_iterator
    
    If the template image is not given then it will be formed assuming that the list has
    been generated by image_scatter_channels with subimages = number of channels

    :param image_list: List of subimages
    :param im: Output image
    :param subimages: Number of image partitions on each axis (2)
    :return: list of subimages
    """

    if im is None:
        nchan = len(image_list)
        _, npol, ny, nx = image_list[0].shape
        im_shape = nchan, npol, ny, ny
        im = create_image_from_array(
            numpy.zeros(im_shape, dtype=image_list[0].data.dtype),
            image_list[0].wcs, image_list[0].polarisation_frame)

    assert image_is_canonical(im)

    if subimages == 0:
        subimages = len(image_list)

    for i, slab in enumerate(image_channel_iter(im, subimages=subimages)):
        slab.data[...] = image_list[i].data[...]

    return im
def weight_blockvisibility(vis,
                           model,
                           gcfcf=None,
                           weighting="uniform",
                           robustness=0.0,
                           **kwargs):
    """ Weight the visibility data

    This is done collectively so the weights are summed over all vis_lists and then
    corrected

    :param vis_list:
    :param model_imagelist: Model required to determine weighting parameters
    :param weighting: Type of weighting
    :param kwargs: Parameters for functions in graphs
    :return: List of vis_graphs
   """

    assert isinstance(vis, BlockVisibility), vis
    assert image_is_canonical(model)

    if gcfcf is None:
        gcfcf = create_pswf_convolutionfunction(model)

    griddata = create_griddata_from_image(model, vis)
    griddata, sumwt = grid_blockvisibility_weight_to_griddata(
        vis, griddata, gcfcf[1])
    vis = griddata_blockvisibility_reweight(vis,
                                            griddata,
                                            gcfcf[1],
                                            weighting=weighting,
                                            robustness=robustness)
    return vis
示例#5
0
def predict_wstack_single(vis,
                          model,
                          remove=True,
                          gcfcf=None,
                          **kwargs) -> Visibility:
    """ Predict using a single w slices.
    
    This processes a single w plane, rotating out the w beam for the average w

    The w-stacking or w-slicing approach is to partition the visibility data by slices in w. The measurement equation is
    approximated as:

    .. math::

        V(u,v,w) =\\sum_i \\int \\frac{ I(l,m) e^{-2 \\pi j (w_i(\\sqrt{1-l^2-m^2}-1))})}{\\sqrt{1-l^2-m^2}} e^{-2 \\pi j (ul+vm)} dl dm

    If images constructed from slices in w are added after applying a w-dependent image plane correction, the w term will be corrected.

    :param vis: Visibility to be predicted
    :param model: model image
    :return: resulting visibility (in place works)
    """

    assert isinstance(vis, Visibility), vis
    assert image_is_canonical(model)

    vis.data['vis'][...] = 0.0

    log.debug("predict_wstack_single: predicting using single w slice")

    # We might want to do wprojection so we remove the average w
    w_average = numpy.average(vis.w)
    if remove:
        vis.data['uvw'][..., 2] -= w_average
    tempvis = copy_visibility(vis)

    # Calculate w beam and apply to the model. The imaginary part is not needed
    workimage = copy_image(model)
    w_beam = create_w_term_like(model, w_average, vis.phasecentre)

    # Do the real part
    workimage.data = w_beam.data.real * model.data
    vis = predict_2d(vis, workimage, gcfcf=gcfcf, **kwargs)

    # and now the imaginary part
    workimage.data = w_beam.data.imag * model.data
    tempvis = predict_2d(tempvis, workimage, gcfcf=gcfcf, **kwargs)
    vis.data['vis'] -= 1j * tempvis.data['vis']

    if remove:
        vis.data['uvw'][..., 2] += w_average

    return vis
示例#6
0
def invert_wstack_single(vis: Visibility,
                         im: Image,
                         dopsf,
                         normalize=True,
                         remove=True,
                         gcfcf=None,
                         **kwargs) -> (Image, numpy.ndarray):
    """Process single w slice
    
    The w-stacking or w-slicing approach is to partition the visibility data by slices in w. The measurement equation is
    approximated as:

    .. math::

        V(u,v,w) =\\sum_i \\int \\frac{ I(l,m) e^{-2 \\pi j (w_i(\\sqrt{1-l^2-m^2}-1))})}{\\sqrt{1-l^2-m^2}} e^{-2 \\pi j (ul+vm)} dl dm

    If images constructed from slices in w are added after applying a w-dependent image plane correction, the w term will be corrected.

    :param vis: Visibility to be inverted
    :param im: image template (not changed)
    :param dopsf: Make the psf instead of the dirty image
    :param normalize: Normalize by the sum of weights (True)
    :returns: image, sum of weights
    """
    assert image_is_canonical(im)

    log.debug("invert_wstack_single: predicting using single w slice")

    kwargs['imaginary'] = True

    assert isinstance(vis, Visibility), vis

    # We might want to do wprojection so we remove the average w
    w_average = numpy.average(vis.w)
    if remove:
        vis.data['uvw'][..., 2] -= w_average

    reWorkimage, sumwt, imWorkimage = invert_2d(vis,
                                                im,
                                                dopsf,
                                                normalize=normalize,
                                                gcfcf=gcfcf,
                                                **kwargs)

    if remove:
        vis.data['uvw'][..., 2] += w_average

    # Calculate w beam and apply to the model. The imaginary part is not needed
    w_beam = create_w_term_like(im, w_average, vis.phasecentre)
    reWorkimage.data = w_beam.data.real * reWorkimage.data - w_beam.data.imag * imWorkimage.data

    return reWorkimage, sumwt
示例#7
0
def get_polarisation_map(vis: Visibility, im: Image = None):
    """ Get the mapping of visibility polarisations to image polarisations

    """
    assert image_is_canonical(im)

    if vis.polarisation_frame == im.polarisation_frame:
        if vis.polarisation_frame == PolarisationFrame('stokesI'):
            return "stokesI->stokesI", lambda pol: 0
        elif vis.polarisation_frame == PolarisationFrame('stokesIQUV'):
            return "stokesIQUV->stokesIQUV", lambda pol: pol

    return "unknown", lambda pol: pol
示例#8
0
def image_channel_iter(im: Image, subimages=1) -> collections.Iterable:
    """Create a image_channel_iter generator, returning images

    The WCS is adjusted appropriately for each raster element. Hence this is a coordinate-aware
    way to iterate through an image.

    Provided we don't break reference semantics, memory should be conserved

    To update the image in place::

        for r in image_channel_iter(im, subimages=nchan):
            r.data[...] = numpy.sqrt(r.data[...])

    :param im: Image
    :param subimages: Number of subimages
    :returns: Generator of images

    See also
        :py:func:`rascil.processing_components.image.image_gather_channels`
        :py:func:`rascil.processing_components.image.image_scatter_channels`
    """

    assert image_is_canonical(im)

    nchan, npol, ny, nx = im.shape

    assert subimages <= nchan, "More subimages %d than channels %d" % (
        subimages, nchan)
    step = nchan // subimages
    channels = numpy.array(range(0, nchan, step), dtype='int')
    assert len(
        channels
    ) == subimages, "subimages %d does not match length of channels %d" % (
        subimages, len(channels))

    for i, channel in enumerate(channels):
        if i + 1 < len(channels):
            channel_max = channels[i + 1]
        else:
            channel_max = nchan

        # Adjust WCS
        wcs = im.wcs.deepcopy()
        wcs.wcs.crpix[3] -= channel

        # Yield image from slice (reference!)
        yield create_image_from_array(im.data[channel:channel_max, ...], wcs,
                                      im.polarisation_frame)
示例#9
0
def image_scatter_channels(im: Image, subimages=None) -> List[Image]:
    """Scatter an image into a list of subimages using the channels

    :param im: Image
    :param subimages: Number of channels
    :return: list of subimages

    See also
        :py:func:`rascil.processing_components.image.iterators.image_channel_iter`
    """

    assert image_is_canonical(im)

    image_list = list()
    if subimages is None:
        subimages = im.shape[0]

    for slab in image_channel_iter(im, subimages=subimages):
        image_list.append(slab)

    assert len(image_list) == subimages, "Too many subimages scattered"

    return image_list
示例#10
0
    def predict_ng(bvis: BlockVisibility, model: Image,
                   **kwargs) -> BlockVisibility:
        """ Predict using convolutional degridding.
        
        Nifty-gridder version. https://gitlab.mpcdf.mpg.de/ift/nifty_gridder
    
        In the imaging and pipeline workflows, this may be invoked using context='ng'.

        :param bvis: BlockVisibility to be predicted
        :param model: model image
        :return: resulting BlockVisibility (in place works)
        """

        assert isinstance(bvis, BlockVisibility), bvis
        assert image_is_canonical(model)

        if model is None:
            return bvis

        nthreads = get_parameter(kwargs, "threads", 4)
        epsilon = get_parameter(kwargs, "epsilon", 1e-12)
        do_wstacking = get_parameter(kwargs, "do_wstacking", True)
        verbosity = get_parameter(kwargs, "verbosity", 0)

        newbvis = copy_visibility(bvis, zero=True)

        # Extracting data from BlockVisibility
        freq = bvis.frequency  # frequency, Hz
        nrows, nants, _, vnchan, vnpol = bvis.vis.shape

        uvw = newbvis.data['uvw'].reshape([nrows * nants * nants, 3])
        vist = numpy.zeros([vnpol, vnchan, nants * nants * nrows],
                           dtype='complex')

        # Get the image properties
        m_nchan, m_npol, ny, nx = model.data.shape
        # Check if the number of frequency channels matches in bvis and a model
        #        assert (m_nchan == v_nchan)
        assert (m_npol == vnpol)

        fuvw = uvw.copy()
        # We need to flip the u and w axes. The flip in w is equivalent to the conjugation of the
        # convolution function grid_visibility to griddata
        fuvw[:, 0] *= -1.0
        fuvw[:, 2] *= -1.0

        # Find out the image size/resolution
        pixsize = numpy.abs(numpy.radians(model.wcs.wcs.cdelt[0]))

        # Make de-gridding over a frequency range and pol fields
        vis_to_im = numpy.round(model.wcs.sub([4]).wcs_world2pix(
            freq, 0)[0]).astype('int')

        mfs = m_nchan == 1

        if mfs:
            for vpol in range(vnpol):
                vist[vpol, :, :] = ng.dirty2ms(
                    fuvw.astype(numpy.float64),
                    bvis.frequency.astype(numpy.float64),
                    model.data[0, vpol, :, :].T.astype(numpy.float64),
                    pixsize_x=pixsize,
                    pixsize_y=pixsize,
                    epsilon=epsilon,
                    do_wstacking=do_wstacking,
                    nthreads=nthreads,
                    verbosity=verbosity).T

        else:
            for vpol in range(vnpol):
                for vchan in range(vnchan):
                    imchan = vis_to_im[vchan]
                    vist[vpol, vchan, :] = ng.dirty2ms(
                        fuvw.astype(numpy.float64),
                        numpy.array(freq[vchan:vchan + 1]).astype(
                            numpy.float64),
                        model.data[imchan, vpol, :, :].T.astype(numpy.float64),
                        pixsize_x=pixsize,
                        pixsize_y=pixsize,
                        epsilon=epsilon,
                        do_wstacking=do_wstacking,
                        nthreads=nthreads,
                        verbosity=verbosity)[:, 0]

        vis = convert_pol_frame(vist.T,
                                model.polarisation_frame,
                                bvis.polarisation_frame,
                                polaxis=2)

        newbvis.data['vis'] = vis.reshape([nrows, nants, nants, vnchan, vnpol])

        # Now we can shift the visibility from the image frame to the original visibility frame
        return shift_vis_to_image(newbvis, model, tangent=True, inverse=True)
示例#11
0
    def invert_ng(bvis: BlockVisibility,
                  model: Image,
                  dopsf: bool = False,
                  normalize: bool = True,
                  **kwargs) -> (Image, numpy.ndarray):
        """ Invert using nifty-gridder module
        
        https://gitlab.mpcdf.mpg.de/ift/nifty_gridder
    
        Use the image im as a template. Do PSF in a separate call.

        In the imaging and pipeline workflows, this may be invoked using context='ng'.

        :param dopsf: Make the PSF instead of the dirty image
        :param bvis: BlockVisibility to be inverted
        :param im: image template (not changed)
        :param normalize: Normalize by the sum of weights (True)
        :return: (resulting image, sum of the weights for each frequency and polarization)
    
        """
        assert image_is_canonical(model)

        assert isinstance(bvis, BlockVisibility), bvis

        im = copy_image(model)

        nthreads = get_parameter(kwargs, "threads", 4)
        epsilon = get_parameter(kwargs, "epsilon", 1e-12)
        do_wstacking = get_parameter(kwargs, "do_wstacking", True)
        verbosity = get_parameter(kwargs, "verbosity", 0)

        sbvis = copy_visibility(bvis)
        sbvis = shift_vis_to_image(sbvis, im, tangent=True, inverse=False)

        freq = sbvis.frequency  # frequency, Hz

        nrows, nants, _, vnchan, vnpol = sbvis.vis.shape
        # if dopsf:
        #     sbvis = fill_vis_for_psf(sbvis)

        ms = sbvis.vis.reshape([nrows * nants * nants, vnchan, vnpol])
        ms = convert_pol_frame(ms,
                               bvis.polarisation_frame,
                               im.polarisation_frame,
                               polaxis=2)

        uvw = sbvis.uvw.reshape([nrows * nants * nants, 3])
        wgt = sbvis.flagged_imaging_weight.reshape(
            [nrows * nants * nants, vnchan, vnpol])

        if epsilon > 5.0e-6:
            ms = ms.astype("c8")
            wgt = wgt.astype("f4")

        # Find out the image size/resolution
        npixdirty = im.nwidth
        pixsize = numpy.abs(numpy.radians(im.wcs.wcs.cdelt[0]))

        fuvw = uvw.copy()
        # We need to flip the u and w axes.
        fuvw[:, 0] *= -1.0
        fuvw[:, 2] *= -1.0

        nchan, npol, ny, nx = im.shape
        im.data[...] = 0.0
        sumwt = numpy.zeros([nchan, npol])

        # There's a latent problem here with the weights.
        # wgt = numpy.real(convert_pol_frame(wgt, bvis.polarisation_frame, im.polarisation_frame, polaxis=2))

        # Set up the conversion from visibility channels to image channels
        vis_to_im = numpy.round(model.wcs.sub([4]).wcs_world2pix(
            freq, 0)[0]).astype('int')

        # Nifty gridder likes to receive contiguous arrays so we transpose
        # at the beginning

        mfs = nchan == 1
        if dopsf:

            mst = ms.T
            mst[...] = 0.0
            mst[0, ...] = 1.0
            wgtt = wgt.T

            if mfs:
                dirty = ng.ms2dirty(fuvw.astype(numpy.float64),
                                    bvis.frequency.astype(numpy.float64),
                                    numpy.ascontiguousarray(mst[0, :, :].T),
                                    numpy.ascontiguousarray(wgtt[0, :, :].T),
                                    npixdirty,
                                    npixdirty,
                                    pixsize,
                                    pixsize,
                                    epsilon,
                                    do_wstacking=do_wstacking,
                                    nthreads=nthreads,
                                    verbosity=verbosity)
                sumwt[0, :] += numpy.sum(wgtt[0, 0, :].T, axis=0)
                im.data[0, :] += dirty.T
            else:
                for vchan in range(vnchan):
                    ichan = vis_to_im[vchan]
                    frequency = numpy.array(freq[vchan:vchan + 1]).astype(
                        numpy.float64)
                    dirty = ng.ms2dirty(
                        fuvw.astype(numpy.float64),
                        frequency.astype(numpy.float64),
                        numpy.ascontiguousarray(mst[0,
                                                    vchan, :][...,
                                                              numpy.newaxis]),
                        numpy.ascontiguousarray(wgtt[0,
                                                     vchan, :][...,
                                                               numpy.newaxis]),
                        npixdirty,
                        npixdirty,
                        pixsize,
                        pixsize,
                        epsilon,
                        do_wstacking=do_wstacking,
                        nthreads=nthreads,
                        verbosity=verbosity)
                    sumwt[ichan, :] += numpy.sum(wgtt[0, ichan, :].T, axis=0)
                    im.data[ichan, :] += dirty.T
        else:
            mst = ms.T
            wgtt = wgt.T
            for pol in range(npol):
                if mfs:
                    dirty = ng.ms2dirty(
                        fuvw.astype(numpy.float64),
                        bvis.frequency.astype(numpy.float64),
                        numpy.ascontiguousarray(mst[pol, :, :].T),
                        numpy.ascontiguousarray(wgtt[pol, :, :].T),
                        npixdirty,
                        npixdirty,
                        pixsize,
                        pixsize,
                        epsilon,
                        do_wstacking=do_wstacking,
                        nthreads=nthreads,
                        verbosity=verbosity)
                    sumwt[0, pol] += numpy.sum(wgtt[pol, 0, :].T, axis=0)
                    im.data[0, pol] += dirty.T
                else:
                    for vchan in range(vnchan):
                        ichan = vis_to_im[vchan]
                        frequency = numpy.array(freq[vchan:vchan + 1]).astype(
                            numpy.float64)
                        dirty = ng.ms2dirty(fuvw.astype(numpy.float64),
                                            frequency.astype(numpy.float64),
                                            numpy.ascontiguousarray(
                                                mst[pol,
                                                    vchan, :][...,
                                                              numpy.newaxis]),
                                            numpy.ascontiguousarray(
                                                wgtt[pol,
                                                     vchan, :][...,
                                                               numpy.newaxis]),
                                            npixdirty,
                                            npixdirty,
                                            pixsize,
                                            pixsize,
                                            epsilon,
                                            do_wstacking=do_wstacking,
                                            nthreads=nthreads,
                                            verbosity=verbosity)
                        sumwt[ichan, pol] += numpy.sum(wgtt[pol, ichan, :].T,
                                                       axis=0)
                        im.data[ichan, pol] += dirty.T

        if normalize:
            im = normalize_sumwt(im, sumwt)

        return im, sumwt
示例#12
0
def deconvolve_cube(dirty: Image,
                    psf: Image,
                    prefix='',
                    **kwargs) -> (Image, Image):
    """ Clean using a variety of algorithms
    
    The algorithms available are:
    
    hogbom: Hogbom CLEAN See: Hogbom CLEAN A&A Suppl, 15, 417, (1974)

    hogbom-complex: Complex Hogbom CLEAN of stokesIQUV image
    
    msclean: MultiScale CLEAN See: Cornwell, T.J., Multiscale CLEAN (IEEE Journal of Selected Topics in Sig Proc,
    2008 vol. 2 pp. 793-801)

    mfsmsclean, msmfsclean, mmclean: MultiScale Multi-Frequency See: U. Rau and T. J. Cornwell,
    “A multi-scale multi-frequency deconvolution algorithm for synthesis imaging in radio interferometry,” A&A 532,
    A71 (2011).
    
    For example::
    
        comp, residual = deconvolve_cube(dirty, psf, niter=1000, gain=0.7, algorithm='msclean',
                                         scales=[0, 3, 10, 30], threshold=0.01)
                                         
    For the MFS clean, the psf must have number of channels >= 2 * nmoment
    
    :param dirty: Image dirty image
    :param psf: Image Point Spread Function
    :param window_shape: Window image (Bool) - clean where True
    :param mask: Window in the form of an image, overrides window_shape
    :param algorithm: Cleaning algorithm: 'msclean'|'hogbom'|'mfsmsclean'
    :param gain: loop gain (float) 0.7
    :param threshold: Clean threshold (0.0)
    :param fractional_threshold: Fractional threshold (0.01)
    :param scales: Scales (in pixels) for multiscale ([0, 3, 10, 30])
    :param nmoment: Number of frequency moments (default 3)
    :param findpeak: Method of finding peak in mfsclean: 'Algorithm1'|'ASKAPSoft'|'CASA'|'RASCIL', Default is RASCIL.
    :return: component image, residual image

    See also
        :py:func:`rascil.processing_components.arrays.cleaners.hogbom`
        :py:func:`rascil.processing_components.arrays.cleaners.hogbom_complex`
        :py:func:`rascil.processing_components.arrays.cleaners.msclean`
        :py:func:`rascil.processing_components.arrays.cleaners.msmfsclean`

    """

    assert isinstance(dirty, Image), dirty
    assert image_is_canonical(dirty)
    assert isinstance(psf, Image), psf
    assert image_is_canonical(psf)

    window_shape = get_parameter(kwargs, 'window_shape', None)
    if window_shape == 'quarter':
        log.info("deconvolve_cube %s: window is inner quarter" % prefix)
        qx = dirty.shape[3] // 4
        qy = dirty.shape[2] // 4
        window = numpy.zeros_like(dirty.data)
        window[..., (qy + 1):3 * qy, (qx + 1):3 * qx] = 1.0
        log.info(
            'deconvolve_cube %s: Cleaning inner quarter of each sky plane' %
            prefix)
    elif window_shape == 'no_edge':
        edge = get_parameter(kwargs, 'window_edge', 16)
        nx = dirty.shape[3]
        ny = dirty.shape[2]
        window = numpy.zeros_like(dirty.data)
        window[..., (edge + 1):(ny - edge), (edge + 1):(nx - edge)] = 1.0
        log.info(
            'deconvolve_cube %s: Window omits %d-pixel edge of each sky plane'
            % (prefix, edge))
    elif window_shape is None:
        log.info("deconvolve_cube %s: Cleaning entire image" % prefix)
        window = None
    else:
        raise ValueError("Window shape %s is not recognized" % window_shape)

    mask = get_parameter(kwargs, 'mask', None)
    if isinstance(mask, Image):
        if window is not None:
            log.warning(
                'deconvolve_cube %s: Overriding window_shape with mask image' %
                (prefix))
        window = mask.data

    psf_support = get_parameter(kwargs, 'psf_support',
                                max(dirty.shape[2] // 2, dirty.shape[3] // 2))
    if (psf_support <= psf.shape[2] // 2) and (
        (psf_support <= psf.shape[3] // 2)):
        centre = [psf.shape[2] // 2, psf.shape[3] // 2]
        psf.data = psf.data[..., (centre[0] - psf_support):(centre[0] +
                                                            psf_support),
                            (centre[1] - psf_support):(centre[1] +
                                                       psf_support)]
        log.info('deconvolve_cube %s: PSF support = +/- %d pixels' %
                 (prefix, psf_support))
        log.info('deconvolve_cube %s: PSF shape %s' %
                 (prefix, str(psf.data.shape)))

    algorithm = get_parameter(kwargs, 'algorithm', 'msclean')

    if algorithm == 'msclean':
        log.info(
            "deconvolve_cube %s: Multi-scale clean of each polarisation and channel separately"
            % prefix)
        gain = get_parameter(kwargs, 'gain', 0.7)
        assert 0.0 < gain < 2.0, "Loop gain must be between 0 and 2"
        thresh = get_parameter(kwargs, 'threshold', 0.0)
        assert thresh >= 0.0
        niter = get_parameter(kwargs, 'niter', 100)
        assert niter > 0
        scales = get_parameter(kwargs, 'scales', [0, 3, 10, 30])
        fracthresh = get_parameter(kwargs, 'fractional_threshold', 0.01)
        assert 0.0 < fracthresh < 1.0

        comp_array = numpy.zeros_like(dirty.data)
        residual_array = numpy.zeros_like(dirty.data)
        for channel in range(dirty.data.shape[0]):
            for pol in range(dirty.data.shape[1]):
                if psf.data[channel, pol, :, :].max():
                    log.info(
                        "deconvolve_cube %s: Processing pol %d, channel %d" %
                        (prefix, pol, channel))
                    if window is None:
                        comp_array[channel, pol, :, :], residual_array[channel, pol, :, :] = \
                            msclean(dirty.data[channel, pol, :, :], psf.data[channel, pol, :, :],
                                    None, gain, thresh, niter, scales, fracthresh, prefix)
                    else:
                        comp_array[channel, pol, :, :], residual_array[channel, pol, :, :] = \
                            msclean(dirty.data[channel, pol, :, :], psf.data[channel, pol, :, :],
                                    window[channel, pol, :, :], gain, thresh, niter, scales, fracthresh,
                                    prefix)
                else:
                    log.info(
                        "deconvolve_cube %s: Skipping pol %d, channel %d" %
                        (prefix, pol, channel))

        comp_image = create_image_from_array(comp_array, dirty.wcs,
                                             dirty.polarisation_frame)
        residual_image = create_image_from_array(residual_array, dirty.wcs,
                                                 dirty.polarisation_frame)

    elif algorithm == 'msmfsclean' or algorithm == 'mfsmsclean' or algorithm == 'mmclean':
        findpeak = get_parameter(kwargs, "findpeak", 'RASCIL')

        log.info(
            "deconvolve_cube %s: Multi-scale multi-frequency clean of each polarisation separately"
            % prefix)
        nmoment = get_parameter(kwargs, "nmoment", 3)
        assert nmoment >= 1, "Number of frequency moments must be greater than or equal to one"
        nchan = dirty.shape[0]
        assert nchan > 2 * (nmoment -
                            1), "Require nchan %d > 2 * (nmoment %d - 1)" % (
                                nchan, 2 * (nmoment - 1))
        dirty_taylor = calculate_image_frequency_moments(dirty,
                                                         nmoment=nmoment)
        if nmoment > 1:
            psf_taylor = calculate_image_frequency_moments(psf,
                                                           nmoment=2 * nmoment)
        else:
            psf_taylor = calculate_image_frequency_moments(psf, nmoment=1)
        psf_peak = numpy.max(psf_taylor.data)
        dirty_taylor.data /= psf_peak
        psf_taylor.data /= psf_peak
        log.info("deconvolve_cube %s: Shape of Dirty moments image %s" %
                 (prefix, str(dirty_taylor.shape)))
        log.info("deconvolve_cube %s: Shape of PSF moments image %s" %
                 (prefix, str(psf_taylor.shape)))
        gain = get_parameter(kwargs, 'gain', 0.7)
        assert 0.0 < gain < 2.0, "Loop gain must be between 0 and 2"
        thresh = get_parameter(kwargs, 'threshold', 0.0)
        assert thresh >= 0.0
        niter = get_parameter(kwargs, 'niter', 100)
        assert niter > 0
        scales = get_parameter(kwargs, 'scales', [0, 3, 10, 30])
        fracthresh = get_parameter(kwargs, 'fractional_threshold', 0.1)
        assert 0.0 < fracthresh < 1.0

        comp_array = numpy.zeros(dirty_taylor.data.shape)
        residual_array = numpy.zeros(dirty_taylor.data.shape)
        for pol in range(dirty_taylor.data.shape[1]):
            # Always use the Stokes I PSF
            if psf_taylor.data[0, 0, :, :].max():
                log.info("deconvolve_cube %s: Processing pol %d" %
                         (prefix, pol))
                if window is None:
                    comp_array[:, pol, :, :], residual_array[:, pol, :, :] = \
                        msmfsclean(dirty_taylor.data[:, pol, :, :], psf_taylor.data[:, 0, :, :],
                                   None, gain, thresh, niter, scales, fracthresh, findpeak, prefix)
                else:
                    log.info(
                        'deconvolve_cube %s: Clean window has %d valid pixels'
                        % (prefix, int(numpy.sum(window[0, pol]))))
                    comp_array[:, pol, :, :], residual_array[:, pol, :, :] = \
                        msmfsclean(dirty_taylor.data[:, pol, :, :], psf_taylor.data[:, 0, :, :],
                                   window[0, pol, :, :], gain, thresh, niter, scales, fracthresh,
                                   findpeak, prefix)
            else:
                log.info("deconvolve_cube %s: Skipping pol %d" % (prefix, pol))

        comp_image = create_image_from_array(comp_array, dirty_taylor.wcs,
                                             dirty.polarisation_frame)
        residual_image = create_image_from_array(residual_array,
                                                 dirty_taylor.wcs,
                                                 dirty.polarisation_frame)

        return_moments = get_parameter(kwargs, "return_moments", False)
        if not return_moments:
            log.info("deconvolve_cube %s: calculating spectral cubes" % prefix)
            comp_image = calculate_image_from_frequency_moments(
                dirty, comp_image)
            residual_image = calculate_image_from_frequency_moments(
                dirty, residual_image)
        else:
            log.info("deconvolve_cube %s: constructed moment cubes" % prefix)

    elif algorithm == 'hogbom':
        log.info(
            "deconvolve_cube %s: Hogbom clean of each polarisation and channel separately"
            % prefix)
        gain = get_parameter(kwargs, 'gain', 0.1)
        assert 0.0 < gain < 2.0, "Loop gain must be between 0 and 2"
        thresh = get_parameter(kwargs, 'threshold', 0.0)
        assert thresh >= 0.0
        niter = get_parameter(kwargs, 'niter', 100)
        assert niter > 0
        fracthresh = get_parameter(kwargs, 'fractional_threshold', 0.1)
        assert 0.0 < fracthresh < 1.0

        comp_array = numpy.zeros(dirty.data.shape)
        residual_array = numpy.zeros(dirty.data.shape)
        for channel in range(dirty.data.shape[0]):
            for pol in range(dirty.data.shape[1]):
                if psf.data[channel, pol, :, :].max():
                    log.info(
                        "deconvolve_cube %s: Processing pol %d, channel %d" %
                        (prefix, pol, channel))
                    if window is None:
                        comp_array[channel, pol, :, :], residual_array[channel, pol, :, :] = \
                            hogbom(dirty.data[channel, pol, :, :], psf.data[channel, pol, :, :],
                                   None, gain, thresh, niter, fracthresh, prefix)
                    else:
                        comp_array[channel, pol, :, :], residual_array[channel, pol, :, :] = \
                            hogbom(dirty.data[channel, pol, :, :], psf.data[channel, pol, :, :],
                                   window[channel, pol, :, :], gain, thresh, niter, fracthresh, prefix)
                else:
                    log.info(
                        "deconvolve_cube %s: Skipping pol %d, channel %d" %
                        (prefix, pol, channel))

        comp_image = create_image_from_array(comp_array, dirty.wcs,
                                             dirty.polarisation_frame)
        residual_image = create_image_from_array(residual_array, dirty.wcs,
                                                 dirty.polarisation_frame)
    elif algorithm == 'hogbom-complex':
        log.info(
            "deconvolve_cube_complex: Hogbom-complex clean of each polarisation and channel separately"
        )
        gain = get_parameter(kwargs, 'gain', 0.1)
        assert 0.0 < gain < 2.0, "Loop gain must be between 0 and 2"
        thresh = get_parameter(kwargs, 'threshold', 0.0)
        assert thresh >= 0.0
        niter = get_parameter(kwargs, 'niter', 100)
        assert niter > 0
        fracthresh = get_parameter(kwargs, 'fractional_threshold', 0.1)
        assert 0.0 <= fracthresh < 1.0

        comp_array = numpy.zeros(dirty.data.shape)
        residual_array = numpy.zeros(dirty.data.shape)
        for channel in range(dirty.data.shape[0]):
            for pol in range(dirty.data.shape[1]):
                if pol == 0 or pol == 3:
                    if psf.data[channel, pol, :, :].max():
                        log.info(
                            "deconvolve_cube_complex: Processing pol %d, channel %d"
                            % (pol, channel))
                        if window is None:
                            comp_array[channel, pol, :, :], residual_array[channel, pol, :, :] = \
                                hogbom(dirty.data[channel, pol, :, :], psf.data[channel, pol, :, :],
                                       None, gain, thresh, niter, fracthresh)
                        else:
                            comp_array[channel, pol, :, :], residual_array[channel, pol, :, :] = \
                                hogbom(dirty.data[channel, pol, :, :], psf.data[channel, pol, :, :],
                                       window[channel, pol, :, :], gain, thresh, niter, fracthresh)
                    else:
                        log.info(
                            "deconvolve_cube_complex: Skipping pol %d, channel %d"
                            % (pol, channel))
                if pol == 1:
                    if psf.data[channel, 1:2, :, :].max():
                        log.info(
                            "deconvolve_cube_complex: Processing pol 1 and 2, channel %d"
                            % (channel))
                        if window is None:
                            comp_array[channel, 1, :, :], comp_array[
                                channel, 2, :, :], residual_array[
                                    channel, 1, :, :], residual_array[
                                        channel, 2, :, :] = hogbom_complex(
                                            dirty.data[channel, 1, :, :],
                                            dirty.data[channel, 2, :, :],
                                            psf.data[channel, 1, :, :],
                                            psf.data[channel, 2, :, :], None,
                                            gain, thresh, niter, fracthresh)
                        else:
                            comp_array[channel, 1, :, :], comp_array[
                                channel, 2, :, :], residual_array[
                                    channel, 1, :, :], residual_array[
                                        channel, 2, :, :] = hogbom_complex(
                                            dirty.data[channel, 1, :, :],
                                            dirty.data[channel, 2, :, :],
                                            psf.data[channel, 1, :, :],
                                            psf.data[channel, 2, :, :],
                                            window[channel, pol, :, :], gain,
                                            thresh, niter, fracthresh)
                    else:
                        log.info(
                            "deconvolve_cube_complex: Skipping pol 1 and 2, channel %d"
                            % (channel))
                if pol == 2:
                    continue

        comp_image = create_image_from_array(
            comp_array,
            dirty.wcs,
            polarisation_frame=PolarisationFrame('stokesIQUV'))
        residual_image = create_image_from_array(
            residual_array,
            dirty.wcs,
            polarisation_frame=PolarisationFrame('stokesIQUV'))

    else:
        raise ValueError('deconvolve_cube %s: Unknown algorithm %s' %
                         (prefix, algorithm))

    return comp_image, residual_image
示例#13
0
def image_raster_iter(im: Image,
                      facets=1,
                      overlap=0,
                      taper='flat',
                      make_flat=False) -> collections.Iterable:
    """Create an image_raster_iter generator, returning images, optionally with overlaps

    The WCS is adjusted appropriately for each raster element. Hence this is a coordinate-aware
    way to iterate through an image.

    Provided we don't break reference semantics, memory should be conserved. However make_flat
    creates a new set of images and thus reference semantics dont hold.

    To update the image in place::

        for r in image_raster_iter(im, facets=2):
            r.data[...] = numpy.sqrt(r.data[...])
            
    If the overlap is greater than zero, we choose to keep all images the same size so the
    other ring of facets are ignored. So if facets=4 and overlap > 0 then the iterator returns
    (facets-2)**2 = 4 images.
    
    A taper is applied in the overlap regions. None implies a constant value, linear is a ramp, and
    quadratic is parabolic at the ends.

    :param im: Image
    :param facets: Number of image partitions on each axis (2)
    :param overlap: overlap in pixels
    :param taper: method of tapering at the edges: 'flat' or 'linear' or 'quadratic' or 'tukey'
    :param make_flat: Make the flat images
    :returns: Generator of images

    See also
        :py:func:`rascil.processing_components.image.image_gather_facets`
        :py:func:`rascil.processing_components.image.image_scatter_facets`
    """

    assert image_is_canonical(im)

    nchan, npol, ny, nx = im.shape
    assert facets <= ny, "Cannot have more raster elements than pixels"
    assert facets <= nx, "Cannot have more raster elements than pixels"

    assert facets >= 1, "Facets cannot be zero or less"
    assert overlap >= 0, "Overlap must be zero or greater"

    if facets == 1:
        yield im
    else:

        assert overlap < (nx // facets), "Overlap in facets is too large"
        assert overlap < (ny // facets), "Overlap in facets is too large"

        # Step between facets
        sx = nx // facets + overlap
        sy = ny // facets + overlap

        # Size of facet
        dx = sx + overlap
        dy = sy + overlap

        # Step between facets
        sx = nx // facets + overlap
        sy = ny // facets + overlap

        # Size of facet
        dx = nx // facets + 2 * overlap
        dy = nx // facets + 2 * overlap

        def taper_linear():
            t = numpy.ones(dx)
            ramp = numpy.arange(0, overlap).astype(float) / float(overlap)

            t[:overlap] = ramp
            t[(dx - overlap):dx] = 1.0 - ramp
            result = numpy.outer(t, t)

            return result

        def taper_quadratic():
            t = numpy.ones(dx)
            ramp = numpy.arange(0, overlap).astype(float) / float(overlap)

            quadratic_ramp = numpy.ones(overlap)
            quadratic_ramp[0:overlap // 2] = 2.0 * ramp[0:overlap // 2]**2
            quadratic_ramp[overlap //
                           2:] = 1 - 2.0 * ramp[overlap // 2:0:-1]**2

            t[:overlap] = quadratic_ramp
            t[(dx - overlap):dx] = 1.0 - quadratic_ramp

            result = numpy.outer(t, t)
            return result

        def taper_tukey():

            xs = numpy.arange(dx) / float(dx)
            r = 2 * overlap / dx
            t = [tukey_filter(x, r) for x in xs]

            result = numpy.outer(t, t)
            return result

        i = 0
        for fy in range(facets):
            y = ny // 2 + sy * (fy - facets // 2) - overlap // 2
            for fx in range(facets):
                x = nx // 2 + sx * (fx - facets // 2) - overlap // 2
                if (x >= 0) and (x + dx) <= nx and (y >= 0) and (y + dy) <= ny:
                    # Adjust WCS
                    wcs = im.wcs.deepcopy()
                    wcs.wcs.crpix[0] -= x
                    wcs.wcs.crpix[1] -= y
                    # yield image from slice (reference!)
                    subim = create_image_from_array(
                        im.data[..., y:y + dy, x:x + dx], wcs,
                        im.polarisation_frame)
                    if overlap > 0 and make_flat:
                        flat = create_empty_image_like(subim)
                        if taper == 'linear':
                            flat.data[..., :, :] = taper_linear()
                        elif taper == 'quadratic':
                            flat.data[..., :, :] = taper_quadratic()
                        elif taper == 'tukey':
                            flat.data[..., :, :] = taper_tukey()
                        else:
                            flat.data[...] = 1.0
                        yield flat
                    else:
                        yield subim
                    i += 1
示例#14
0
文件: ng.py 项目: Yonhua/rascil
    def invert_ng(bvis: BlockVisibility,
                  model: Image,
                  dopsf: bool = False,
                  normalize: bool = True,
                  **kwargs) -> (Image, numpy.ndarray):
        """ Invert using nifty-gridder module
        
        https://gitlab.mpcdf.mpg.de/ift/nifty_gridder
    
        Use the image im as a template. Do PSF in a separate call.
    
        This is at the bottom of the layering i.e. all transforms are eventually expressed in terms
        of this function. . Any shifting needed is performed here.
    
        :param bvis: BlockVisibility to be inverted
        :param im: image template (not changed)
        :param normalize: Normalize by the sum of weights (True)
        :return: (resulting image, sum of the weights for each frequency and polarization)
    
        """
        assert image_is_canonical(model)

        assert isinstance(bvis, BlockVisibility), bvis

        im = copy_image(model)

        nthreads = get_parameter(kwargs, "threads", 4)
        epsilon = get_parameter(kwargs, "epsilon", 1e-12)
        do_wstacking = get_parameter(kwargs, "do_wstacking", True)
        verbosity = get_parameter(kwargs, "verbosity", 0)

        sbvis = copy_visibility(bvis)
        sbvis = shift_vis_to_image(sbvis, im, tangent=True, inverse=False)

        vis = bvis.vis

        freq = sbvis.frequency  # frequency, Hz

        nrows, nants, _, vnchan, vnpol = vis.shape
        uvw = sbvis.uvw.reshape([nrows * nants * nants, 3])
        ms = vis.reshape([nrows * nants * nants, vnchan, vnpol])
        wgt = sbvis.imaging_weight.reshape(
            [nrows * nants * nants, vnchan, vnpol])

        if dopsf:
            ms[...] = 1.0 + 0.0j

        if epsilon > 5.0e-6:
            ms = ms.astype("c8")
            wgt = wgt.astype("f4")

        # Find out the image size/resolution
        npixdirty = im.nwidth
        pixsize = numpy.abs(numpy.radians(im.wcs.wcs.cdelt[0]))

        fuvw = uvw.copy()
        # We need to flip the u and w axes.
        fuvw[:, 0] *= -1.0
        fuvw[:, 2] *= -1.0

        nchan, npol, ny, nx = im.shape
        im.data[...] = 0.0
        sumwt = numpy.zeros([nchan, npol])

        ms = convert_pol_frame(ms,
                               bvis.polarisation_frame,
                               im.polarisation_frame,
                               polaxis=2)
        # There's a latent problem here with the weights.
        # wgt = numpy.real(convert_pol_frame(wgt, bvis.polarisation_frame, im.polarisation_frame, polaxis=2))

        # Set up the conversion from visibility channels to image channels
        vis_to_im = numpy.round(model.wcs.sub([4]).wcs_world2pix(
            freq, 0)[0]).astype('int')
        for vchan in range(vnchan):
            ichan = vis_to_im[vchan]
            for pol in range(npol):
                # Nifty gridder likes to receive contiguous arrays
                ms_1d = numpy.array([
                    ms[row, vchan:vchan + 1, pol]
                    for row in range(nrows * nants * nants)
                ],
                                    dtype='complex')
                ms_1d.reshape([ms_1d.shape[0], 1])
                wgt_1d = numpy.array([
                    wgt[row, vchan:vchan + 1, pol]
                    for row in range(nrows * nants * nants)
                ])
                wgt_1d.reshape([wgt_1d.shape[0], 1])
                dirty = ng.ms2dirty(fuvw,
                                    freq[vchan:vchan + 1],
                                    ms_1d,
                                    wgt_1d,
                                    npixdirty,
                                    npixdirty,
                                    pixsize,
                                    pixsize,
                                    epsilon,
                                    do_wstacking=do_wstacking,
                                    nthreads=nthreads,
                                    verbosity=verbosity)
                sumwt[ichan, pol] += numpy.sum(wgt[:, vchan, pol])
                im.data[ichan, pol] += dirty.T

        if normalize:
            im = normalize_sumwt(im, sumwt)

        return im, sumwt
示例#15
0
def invert_timeslice_single(vis: Visibility,
                            im: Image,
                            dopsf,
                            normalize=True,
                            remove=True,
                            gcfcf=None,
                            **kwargs) -> (Image, numpy.ndarray):
    """Process single time slice

    Extracted for re-use in parallel version

    The w-term can be viewed as a time-variable distortion. Approximating the array as instantaneously
    co-planar, we have that w can be expressed in terms of u,v:

    .. math::
        w = a u + b v

    Transforming to a new coordinate system:

    .. math::

        l' = l + a ( \\sqrt{1-l^2-m^2}-1))

    .. math::

        m' = m + b ( \\sqrt{1-l^2-m^2}-1))

    Ignoring changes in the normalisation term, we have:

    .. math::

        V(u,v,w) =\\int \\frac{ I(l',m')} { \\sqrt{1-l'^2-m'^2}} e^{-2 \\pi j (ul'+um')} dl' dm'

    :param vis: Visibility to be inverted
    :param im: image template (not changed)
    :param dopsf: Make the psf instead of the dirty image
    :param gcfcf: (Grid correction function, convolution function)
    :param normalize: Normalize by the sum of weights (True)
    :returns: image, sum of weights
    """
    assert isinstance(vis, Visibility), vis
    assert image_is_canonical(im)

    uvw = vis.uvw
    vis, p, q = fit_uvwplane(vis, remove=remove)

    workimage, sumwt = invert_2d(vis,
                                 im,
                                 dopsf,
                                 normalize=normalize,
                                 gcfcf=gcfcf,
                                 **kwargs)
    # Work image is distorted. We describe the distortion by putting the olbiquity parameters in
    # the wcs. The output image should be described as having zero olbiquity parameters.

    if numpy.abs(p) > 1e-7 or numpy.abs(q) > 1e-7:
        # Note that this has to be zero relative in first element, one relative in second!!!!
        workimage.wcs.wcs.set_pv([(0, 1, -p), (0, 2, -q)])

        finalimage, footprint = reproject_image(workimage, im.wcs, im.shape)
        finalimage.data[footprint.data <= 0.0] = 0.0
        finalimage.wcs.wcs.set_pv([(0, 1, 0.0), (0, 2, 0.0)])

        if remove:
            vis.data['uvw'][...] = uvw

        return finalimage, sumwt
    else:
        if remove:
            vis.data['uvw'][...] = uvw

        return workimage, sumwt
示例#16
0
def predict_timeslice_single(vis: Visibility,
                             model: Image,
                             predict=predict_2d,
                             remove=True,
                             gcfcf=None,
                             **kwargs) -> Visibility:
    """ Predict using a single time slices.

    This fits a single plane and corrects the image geometry.

    The w-term can be viewed as a time-variable distortion. Approximating the array as instantaneously
    co-planar, we have that w can be expressed in terms of u,v:

    .. math::
        w = a u + b v

    Transforming to a new coordinate system:

    .. math::

        l' = l + a ( \\sqrt{1-l^2-m^2}-1))

    .. math::

        m' = m + b ( \\sqrt{1-l^2-m^2}-1))

    Ignoring changes in the normalisation term, we have:

    .. math::

        V(u,v,w) =\\int \\frac{ I(l',m')} { \\sqrt{1-l'^2-m'^2}} e^{-2 \\pi j (ul'+um')} dl' dm'

    :param vis: Visibility to be predicted
    :param model: model image
    :param predict:
    :param remove: Remove fitted w (so that wprojection will do the right thing)
    :param gcfcf: (Grid correction function, convolution function)
    :return: resulting visibility (in place works)
    """
    assert image_is_canonical(model)

    assert isinstance(vis, Visibility), vis

    vis.data['vis'][...] = 0.0

    # Fit and remove best fitting plane for this slice
    uvw = vis.uvw
    avis, p, q = fit_uvwplane(vis, remove=remove)

    # We want to describe work image as distorted. We describe the distortion by putting
    # the olbiquity parameters in the wcs. The input model should be described as having
    # zero olbiquity parameters.
    # Note that this has to be zero relative in first element, one relative in second!!!
    if numpy.abs(p) > 1e-7 or numpy.abs(q) > 1e-7:

        newwcs = model.wcs.deepcopy()
        newwcs.wcs.set_pv([(0, 1, -p), (0, 2, -q)])
        workimage, footprintimage = reproject_image(model,
                                                    newwcs,
                                                    shape=model.shape)
        workimage.data[footprintimage.data <= 0.0] = 0.0
        workimage.wcs.wcs.set_pv([(0, 1, -p), (0, 2, -q)])

        # Now we can do the predict
        vis = predict(avis, workimage, gcfcf=gcfcf, **kwargs)
    else:
        vis = predict(avis, model, gcfcf=gcfcf, **kwargs)

    if remove:
        avis.data['uvw'][...] = uvw

    return vis