def gather_image_iteration_results(results, template_model):
     result = create_empty_image_like(template_model)
     flat = create_empty_image_like(template_model)
     i = 0
     sumwt = numpy.zeros([template_model.nchan, template_model.npol])
     for dpatch in image_scatter_facets(result,
                                        facets=facets,
                                        overlap=overlap,
                                        taper=taper):
         assert i < len(
             results), "Too few results in gather_image_iteration_results"
         if results[i] is not None:
             assert len(results[i]) == 2, results[i]
             dpatch.data[...] += results[i][0].data[...]
             sumwt += results[i][1]
             i += 1
     flat = image_gather_facets(results,
                                flat,
                                facets=facets,
                                overlap=overlap,
                                taper=taper,
                                return_flat=True)
     result.data[flat.data > 0.5] /= flat.data[flat.data > 0.5]
     result.data[flat.data <= 0.5] = 0.0
     return result, sumwt
    def test_scatter_gather_facet(self):

        m31original = create_test_image(
            polarisation_frame=PolarisationFrame('stokesI'))
        assert numpy.max(numpy.abs(m31original.data)), "Original is empty"

        for nraster in [1, 4, 8]:
            m31model = create_test_image(
                polarisation_frame=PolarisationFrame('stokesI'))
            image_list = image_scatter_facets(m31model, facets=nraster)
            for patch in image_list:
                assert patch.data.shape[3] == (m31model.data.shape[3] // nraster), \
                    "Number of pixels in each patch: %d not as expected: %d" % (patch.data.shape[3],
                                                                                (m31model.data.shape[3] // nraster))
                assert patch.data.shape[2] == (m31model.data.shape[2] // nraster), \
                    "Number of pixels in each patch: %d not as expected: %d" % (patch.data.shape[2],
                                                                                (m31model.data.shape[2] // nraster))
                patch.data[...] = 1.0
            m31reconstructed = create_empty_image_like(m31model)
            m31reconstructed = image_gather_facets(image_list,
                                                   m31reconstructed,
                                                   facets=nraster)
            flat = image_gather_facets(image_list,
                                       m31reconstructed,
                                       facets=nraster,
                                       return_flat=True)

            assert numpy.max(numpy.abs(
                flat.data)), "Flat is empty for %d" % nraster
            assert numpy.max(numpy.abs(
                m31reconstructed.data)), "Raster is empty for %d" % nraster
Example #3
0
def deconvolve_channel_list_serial_workflow(dirty_list, psf_list,
                                            model_imagelist, subimages,
                                            **kwargs):
    """Create a graph for deconvolution by channels, adding to the model

    Does deconvolution channel by channel.
    :param dirty_list: List of dirty images
    :param psf_list: List of PSFs, must be the size of a facet
    :param model_imagelist: list of current model
    :param subimages: Number of channels to split into
    :param kwargs: Parameters for functions in components
    :return: list of updated models
    """
    def deconvolve_subimage(dirty, psf):
        assert isinstance(dirty, Image)
        assert isinstance(psf, Image)
        comp = deconvolve_cube(dirty, psf, **kwargs)
        return comp[0]

    def add_model(sum_model, model):
        assert isinstance(output, Image)
        assert isinstance(model, Image)
        sum_model.data += model.data
        return sum_model

    output = create_empty_image_like(model_imagelist)
    dirty_lists = image_scatter_channels(dirty_list[0], subimages=subimages)
    results = [
        deconvolve_subimage(dirty_list, psf_list[0])
        for dirty_list in dirty_lists
    ]
    result = image_gather_channels(results, output, subimages=subimages)
    return add_model(result, model_imagelist)
Example #4
0
def create_low_test_beam(model: Image, use_local=True) -> Image:
    """Create a test power beam for LOW using an image from OSKAR

    This is not fit for anything except the most basic testing. It does not include any form of elevation/pa dependence.

    :param model: Template image
    :return: Image
    """
    beam = import_image_from_fits(
        rascil_path('data/models/SKA1_LOW_beam.fits'))

    # Scale the image cellsize to account for the different in frequencies. Eventually we will want to
    # use a frequency cube
    log.debug(
        "create_low_test_beam: LOW voltage pattern is defined at %.3f MHz" %
        (beam.wcs.wcs.crval[2] * 1e-6))

    nchan, npol, ny, nx = model.shape

    # We need to interpolate each frequency channel separately. The beam is assumed to just scale with
    # frequency.

    reprojected_beam = create_empty_image_like(model)

    for chan in range(nchan):

        model2dwcs = model.wcs.sub(2).deepcopy()
        model2dshape = [model.shape[2], model.shape[3]]
        beam2dwcs = beam.wcs.sub(2).deepcopy()

        # The frequency axis is the second to last in the beam
        frequency = model.wcs.sub(['spectral']).wcs_pix2world([chan], 0)[0]
        fscale = beam.wcs.wcs.crval[2] / frequency

        beam2dwcs.wcs.cdelt = fscale * beam.wcs.sub(2).wcs.cdelt
        beam2dwcs.wcs.crpix = beam.wcs.sub(2).wcs.crpix
        beam2dwcs.wcs.crval = model.wcs.sub(2).wcs.crval
        beam2dwcs.wcs.ctype = model.wcs.sub(2).wcs.ctype
        model2dwcs.wcs.crpix = [
            model.shape[2] // 2 + 1, model.shape[3] // 2 + 1
        ]

        beam2d = create_image_from_array(beam.data[0, 0, :, :], beam2dwcs,
                                         model.polarisation_frame)
        reprojected_beam2d, footprint = reproject_image(beam2d,
                                                        model2dwcs,
                                                        shape=model2dshape)
        assert numpy.max(
            footprint.data) > 0.0, "No overlap between beam and model"

        reprojected_beam2d.data[footprint.data <= 0.0] = 0.0
        for pol in range(npol):
            reprojected_beam.data[chan,
                                  pol, :, :] = reprojected_beam2d.data[:, :]

    set_pb_header(reprojected_beam, use_local=use_local)
    return reprojected_beam
 def invert_ignore_none(vis, model, gg):
     if vis is not None:
         return invert(vis,
                       model,
                       context=context,
                       dopsf=dopsf,
                       normalize=normalize,
                       gcfcf=gg,
                       **kwargs)
     else:
         return create_empty_image_like(model), numpy.zeros(
             [model.nchan, model.npol])
Example #6
0
 def gather_image_iteration_results(results, template_model):
     result = create_empty_image_like(template_model)
     i = 0
     sumwt = numpy.zeros([template_model.nchan, template_model.npol])
     for dpatch in image_scatter_facets(result, facets=facets):
         assert i < len(results), "Too few results in gather_image_iteration_results"
         if results[i] is not None:
             assert len(results[i]) == 2, results[i]
             dpatch.data[...] = results[i][0].data[...]
             sumwt += results[i][1]
             i += 1
     return result, sumwt
Example #7
0
def image_gradients(im: Image):
    """Calculate image first order gradients numerically

    Two images are returned: one with respect to x and one with respect to y
    
    Gradient units are (incoming unit)/pixel e.g. Jy/beam/pixel
    
    :param im: Image
    :return: Gradient images
    """
    assert isinstance(im, Image)

    nchan, npol, ny, nx = im.shape

    gradientx = create_empty_image_like(im)
    gradientx.data[..., :,
                   1:nx] = im.data[..., :, 1:nx] - im.data[..., :, 0:(nx - 1)]
    gradienty = create_empty_image_like(im)
    gradienty.data[...,
                   1:ny, :] = im.data[..., 1:ny, :] - im.data[...,
                                                              0:(ny - 1), :]

    return gradientx, gradienty
    def test_scatter_gather_facet_overlap_taper(self):

        m31original = create_test_image(
            polarisation_frame=PolarisationFrame('stokesI'))
        assert numpy.max(numpy.abs(m31original.data)), "Original is empty"

        for taper in ['linear', None]:
            for nraster, overlap in [(1, 0), (4, 8), (8, 8), (8, 16)]:
                m31model = create_test_image(
                    polarisation_frame=PolarisationFrame('stokesI'))
                image_list = image_scatter_facets(m31model,
                                                  facets=nraster,
                                                  overlap=overlap,
                                                  taper=taper)
                for patch in image_list:
                    assert patch.data.shape[3] == (2 * overlap + m31model.data.shape[3] // nraster), \
                        "Number of pixels in each patch: %d not as expected: %d" % (patch.data.shape[3],
                                                                                    (2 * overlap + m31model.data.shape[3] //
                                                                                     nraster))
                    assert patch.data.shape[2] == (2 * overlap + m31model.data.shape[2] // nraster), \
                        "Number of pixels in each patch: %d not as expected: %d" % (patch.data.shape[2],
                                                                                    (2 * overlap + m31model.data.shape[2] //
                                                                                     nraster))
                m31reconstructed = create_empty_image_like(m31model)
                m31reconstructed = image_gather_facets(image_list,
                                                       m31reconstructed,
                                                       facets=nraster,
                                                       overlap=overlap,
                                                       taper=taper)
                flat = image_gather_facets(image_list,
                                           m31reconstructed,
                                           facets=nraster,
                                           overlap=overlap,
                                           taper=taper,
                                           return_flat=True)
                if self.persist:
                    export_image_to_fits(
                        m31reconstructed,
                        "%s/test_image_gather_scatter_%dnraster_%doverlap_%s_reconstructed.fits"
                        % (self.dir, nraster, overlap, taper))
                if self.persist:
                    export_image_to_fits(
                        flat,
                        "%s/test_image_gather_scatter_%dnraster_%doverlap_%s_flat.fits"
                        % (self.dir, nraster, overlap, taper))

                assert numpy.max(numpy.abs(
                    flat.data)), "Flat is empty for %d" % nraster
                assert numpy.max(numpy.abs(
                    m31reconstructed.data)), "Raster is empty for %d" % nraster
Example #9
0
 def extract_psf(psf, facets):
     spsf = create_empty_image_like(psf)
     cx = spsf.shape[3] // 2
     cy = spsf.shape[2] // 2
     wx = spsf.shape[3] // facets
     wy = spsf.shape[2] // facets
     xbeg = cx - wx // 2
     xend = cx + wx // 2
     ybeg = cy - wy // 2
     yend = cy + wy // 2
     spsf.data = psf.data[..., ybeg:yend, xbeg:xend]
     spsf.wcs.wcs.crpix[0] -= xbeg
     spsf.wcs.wcs.crpix[1] -= ybeg
     return spsf
Example #10
0
def create_vp_generic(model,
                      pointingcentre=None,
                      diameter=25.0,
                      blockage=1.8,
                      use_local=True):
    """ Create a generic analytical model of the voltage pattern

    Feeed legs are ignored

    :param model:
    :param diameter: Diameter of dish (m)
    :param blockage: Diameter of blockage
    :return:
    """

    beam = create_empty_image_like(model)
    beam.data = numpy.zeros(beam.data.shape, dtype='complex')

    nchan, npol, ny, nx = model.shape

    if pointingcentre is not None:
        cx, cy = pointingcentre.to_pixel(model.wcs, origin=0)
    else:
        cx, cy = beam.wcs.sub(2).wcs.crpix[0] - 1, beam.wcs.sub(
            2).wcs.crpix[1] - 1

    for chan in range(nchan):

        # The frequency axis is the second to last in the beam
        frequency = model.wcs.sub(['spectral']).wcs_pix2world([chan], 0)[0]
        wavelength = const.c.to('m s^-1').value / frequency

        d2r = numpy.pi / 180.0
        scale = d2r * numpy.abs(beam.wcs.sub(2).wcs.cdelt[0])
        xx, yy = numpy.meshgrid(scale * (range(nx) - cx),
                                scale * (range(ny) - cy))
        # Radius of each cell in radians
        rr = numpy.sqrt(xx**2 + yy**2)

        blockage_factor = (blockage / diameter)**2

        for pol in range(npol):
            reflector = ft_disk(rr * numpy.pi * diameter / wavelength)
            blockage = ft_disk(rr * numpy.pi * blockage / wavelength)
            beam.data[chan, pol, ...] = reflector - blockage_factor * blockage

    set_pb_header(beam, use_local=use_local)
    return beam
 def make_residual(dcal, tl, it):
     res = create_empty_image_like(dcal[0][0])
     for i, d in enumerate(dcal):
         assert numpy.max(numpy.abs(d[0].data)) > 0.0, "Residual subimage is zero"
         if tl[i].mask is None:
             res.data += d[0].data
         else:
             assert numpy.max(numpy.abs(tl[i].mask.data)) > 0.0, "Mask image is zero"
             res.data += d[0].data * tl[i].mask.data
             
     assert numpy.max(numpy.abs(res.data)) > 0.0, "Residual image is zero"
     # import matplotlib.pyplot as plt
     # from rascil.processing_components.image import show_image
     # show_image(res, title='MPCCAL residual image, iteration %d' % it)
     # plt.show()
     return res
def sum_invert_results(image_list, normalize=True):
    """ Sum a set of invert results with appropriate weighting

    :param image_list: List of [image, sum weights] pairs
    :return: image, sum of weights
    """
    if len(image_list) == 1:
        return image_list[0]

    im = create_empty_image_like(image_list[0][0])
    sumwt = image_list[0][1].copy()
    sumwt *= 0.0

    for i, arg in enumerate(image_list):
        if arg is not None:
            im.data += arg[1][..., numpy.newaxis, numpy.newaxis] * arg[0].data
            sumwt += arg[1]

    if normalize:
        im = normalize_sumwt(im, sumwt)
    return im, sumwt
Example #13
0
def mosaic_pb(model, telescope, pointingcentres, use_local=True):
    """ Create a mosaic effective primary beam by adding primary beams for a set of pointing centres
    
    Note that the addition is root sum of squares
    
    :param model:  Template image
    :param telescope:
    :param pointingcentres: list of pointing centres
    :return:
    """
    assert isinstance(pointingcentres,
                      collections.Iterable), "Need a list of pointing centres"
    sumpb = create_empty_image_like(model)
    for pc in pointingcentres:
        pb = create_pb(model,
                       telescope,
                       pointingcentre=pc,
                       use_local=use_local)
        sumpb.data += pb.data**2
    sumpb.data = numpy.sqrt(sumpb.data)
    return sumpb
Example #14
0
    def test_raster_overlap(self):

        m31original = create_test_image(
            polarisation_frame=PolarisationFrame('stokesI'))
        assert numpy.max(numpy.abs(m31original.data)), "Original is empty"
        flat = create_empty_image_like(m31original)

        for nraster, overlap in [(1, 0), (1, 16), (4, 8), (4, 16), (8, 8),
                                 (16, 4), (9, 5)]:
            m31model = create_test_image(
                polarisation_frame=PolarisationFrame('stokesI'))
            for patch, flat_patch in zip(
                    image_raster_iter(m31model,
                                      facets=nraster,
                                      overlap=overlap),
                    image_raster_iter(flat, facets=nraster, overlap=overlap)):
                patch.data *= 2.0
                flat_patch.data[...] += 1.0

            assert numpy.max(numpy.abs(
                m31model.data)), "Raster is empty for %d" % nraster
    difference_image = copy_image(mpccal_restored)
    difference_image.data -= ical_restored.data

    print(qa_image(difference_image, context='MPCCAL - ICAL image'))
    show_image(difference_image,
               title='MPCCAL - ICAL image',
               components=ical_components)
    plt.show(block=block_plots)
    export_image_to_fits(
        difference_image,
        rascil_path(
            'test_results/low-sims-mpc-mpccal-ical-restored_%.1frmax.fits' %
            rmax))

    newscreen = create_empty_image_like(screen)
    gaintables = [sm.gaintable for sm in mpccal_skymodel]
    newscreen, weights = grid_gaintable_to_screen(block_vis, gaintables,
                                                  newscreen)
    export_image_to_fits(
        newscreen,
        rascil_path('test_results/low-sims-mpc-mpccal-screen_%.1frmax.fits' %
                    rmax))
    export_image_to_fits(
        weights,
        rascil_path(
            'test_results/low-sims-mpc-mpccal-screenweights_%.1frmax.fits' %
            rmax))
    print(qa_image(weights))
    print(qa_image(newscreen))
Example #16
0
def image_gather_facets(image_list: List[Image],
                        im: Image,
                        facets=1,
                        overlap=0,
                        taper=None,
                        return_flat=False):
    """Gather a list of subimages back into an image using the  image_raster_iterator

    If the overlap is greater than zero, we choose to keep all images the same size so the
    other ring of facets are ignored. So if facets=4 and overlap > 0 then the gather expects
    (facets-2)**2 = 4 images.

    To normalize the overlap we make a set of flats, gather that and divide. The flat may be optionally returned
    instead of the result

    :param image_list: List of subimages
    :param im: Output image
    :param facets: Number of image partitions on each axis (2)
    :param overlap: Overlap between neighbours in pixels
    :param taper: Taper at edges None or 'linear' or 'Tukey'
    :param return_flat: Return the flat
    :return: list of subimages

    See also
        :py:func:`rascil.processing_components.image.iterators.image_raster_iter`
    """
    out = create_empty_image_like(im)
    if overlap > 0:
        flat = create_empty_image_like(im)
        flat.data[...] = 1.0
        flats = [
            f for f in image_raster_iter(flat,
                                         facets=facets,
                                         overlap=overlap,
                                         taper=taper,
                                         make_flat=True)
        ]

        sum_flats = create_empty_image_like(im)

        if return_flat:
            i = 0
            for sum_flat_facet in image_raster_iter(sum_flats,
                                                    facets=facets,
                                                    overlap=overlap,
                                                    taper=taper):
                sum_flat_facet.data[...] += flats[i].data[...]
                i += 1

            return sum_flats
        else:
            i = 0
            for out_facet, sum_flat_facet in zip(
                    image_raster_iter(out,
                                      facets=facets,
                                      overlap=overlap,
                                      taper=taper),
                    image_raster_iter(sum_flats,
                                      facets=facets,
                                      overlap=overlap,
                                      taper=taper)):
                out_facet.data[...] += flats[i].data * image_list[i].data[...]
                sum_flat_facet.data[...] += flats[i].data[...]
                i += 1

            out.data[sum_flats.data > 0.0] /= sum_flats.data[
                sum_flats.data > 0.0]
            out.data[sum_flats.data <= 0.0] = 0.0

            return out
    else:
        flat = create_empty_image_like(im)
        flat.data[...] = 1.0

        if return_flat:
            return flat
        else:
            for i, facet in enumerate(
                    image_raster_iter(out,
                                      facets=facets,
                                      overlap=overlap,
                                      taper=taper)):
                facet.data[...] += image_list[i].data[...]

            return out
Example #17
0
def grid_gaintable_to_screen(vis,
                             gaintables,
                             screen,
                             height=3e5,
                             gaintable_slices=None,
                             scale=1.0,
                             **kwargs):
    """ Grid a gaintable to a screen image

    Screen axes are ['XX', 'YY', 'TIME', 'FREQ']

    The phases are just averaged per grid cell, no phase unwrapping is performed.

    :param vis:
    :param gaintables: input gaintables
    :param screen:
    :param height: Height (in m) of screen above telescope e.g. 3e5
    :param scale: Multiply the screen by this factor
    :return: gridded screen image, weights image
    """
    assert isinstance(vis, BlockVisibility)

    station_locations = vis.configuration.xyz

    nant = station_locations.shape[0]
    t2r = numpy.pi / 43200.0

    newscreen = create_empty_image_like(screen)
    weights = create_empty_image_like(screen)
    nchan, ntimes, ny, nx = screen.shape

    # The time in the Visibility is hour angle in seconds!
    number_no_weight = 0
    for gaintable in gaintables:
        for iha, rows in enumerate(
                gaintable_timeslice_iter(gaintable,
                                         gaintable_slices=gaintable_slices)):
            gt = create_gaintable_from_rows(gaintable, rows)
            ha = numpy.average(gt.time)

            pp = find_pierce_points(station_locations,
                                    (gt.phasecentre.ra.rad + t2r * ha) * u.rad,
                                    gt.phasecentre.dec,
                                    height=height,
                                    phasecentre=vis.phasecentre)
            scr = numpy.angle(gt.gain[0, :, 0, 0, 0])
            wt = gt.weight[0, :, 0, 0, 0]
            for ant in range(nant):
                pp0 = pp[ant][0:2]
                for freq in vis.frequency:
                    phase2tec = -freq / 8.44797245e9
                    worldloc = [pp0[0], pp0[1], ha, freq]
                    pixloc = newscreen.wcs.wcs_world2pix([worldloc],
                                                         0)[0].astype('int')
                    assert pixloc[0] >= 0
                    assert pixloc[0] < nx
                    assert pixloc[1] >= 0
                    assert pixloc[1] < ny
                    pixloc[3] = 0
                    newscreen.data[pixloc[3], pixloc[2], pixloc[1],
                                   pixloc[0]] += wt[ant] * phase2tec * scr[ant]
                    weights.data[pixloc[3], pixloc[2], pixloc[1],
                                 pixloc[0]] += wt[ant]
                    if wt[ant] == 0.0:
                        number_no_weight += 1
    if number_no_weight > 0:
        log.warning(
            "grid_gaintable_to_screen: %d pierce points are have no weight" %
            (number_no_weight))

    newscreen.data[weights.data > 0.0] = newscreen.data[
        weights.data > 0.0] / weights.data[weights.data > 0.0]

    return newscreen, weights
def create_vpterm_convolutionfunction(im,
                                      make_vp=None,
                                      oversampling=8,
                                      support=6,
                                      use_aaf=False,
                                      maxsupport=512,
                                      pa=None,
                                      normalise=True):
    """ Fill voltage pattern kernel projection kernel into a GridData.
    
    The makes the convolution function for gridding polarised data with a voltage
    pattern.

    :param im: Image template
    :param make_vp: Function to make the voltage pattern model image (hint: use a partial)
    :param oversampling: Oversampling of the convolution function in uv space
    :return: griddata correction Image, griddata kernel as GridData
    """
    if oversampling % 2 == 0:
        log.info("Setting oversampling to next greatest odd number {}".format(
            oversampling))
        oversampling += 1

    d2r = numpy.pi / 180.0

    # We only need the griddata correction function for the PSWF so we make
    # it for the shape of the image
    nchan, npol, ony, onx = im.data.shape

    assert isinstance(im, Image)
    # Calculate the template convolution kernel.
    cf = create_convolutionfunction_from_image(im,
                                               oversampling=oversampling,
                                               support=support)

    cf_shape = list(cf.data.shape)
    cf.data = numpy.zeros(cf_shape).astype('complex')

    assert isinstance(oversampling, int)
    assert oversampling > 0

    nx = max(maxsupport, 2 * oversampling * support)
    ny = max(maxsupport, 2 * oversampling * support)

    qnx = nx // oversampling
    qny = ny // oversampling

    cf.data[...] = 0.0

    subim = copy_image(im)
    ccell = onx * numpy.abs(d2r * subim.wcs.wcs.cdelt[0]) / qnx

    subim.data = numpy.zeros([nchan, npol, qny, qnx])
    subim.wcs.wcs.cdelt[0] = -ccell / d2r
    subim.wcs.wcs.cdelt[1] = +ccell / d2r
    subim.wcs.wcs.crpix[0] = qnx // 2 + 1.0
    subim.wcs.wcs.crpix[1] = qny // 2 + 1.0

    vp = make_vp(subim)

    if pa is not None:
        rvp = convert_azelvp_to_radec(vp, subim, pa)
    else:
        rvp = convert_azelvp_to_radec(vp, subim, 0.0)

    if use_aaf:
        this_pswf_gcf, _ = create_pswf_convolutionfunction(subim,
                                                           oversampling=1,
                                                           support=6)
        rvp.data /= this_pswf_gcf.data

    # We might need to work with a larger image
    padded_shape = [nchan, npol, ny, nx]
    paddedplane = pad_image(rvp, padded_shape)
    paddedplane = fft_image(paddedplane)

    ycen, xcen = ny // 2, nx // 2
    for y in range(oversampling):
        ybeg = y + ycen + (support * oversampling) // 2 - oversampling // 2
        yend = y + ycen - (support * oversampling) // 2 - oversampling // 2
        # vv = range(ybeg, yend, -oversampling)
        for x in range(oversampling):
            xbeg = x + xcen + (support * oversampling) // 2 - oversampling // 2
            xend = x + xcen - (support * oversampling) // 2 - oversampling // 2

            # uu = range(xbeg, xend, -oversampling)
            cf.data[..., 0, y, x, :, :] = \
                paddedplane.data[..., ybeg:yend:-oversampling, xbeg:xend:-oversampling]

    if normalise:
        cf.data /= numpy.sum(
            numpy.real(cf.data[0, 0, 0, oversampling // 2,
                               oversampling // 2, :, :]))
    cf.data = numpy.conjugate(cf.data)

    if use_aaf:
        pswf_gcf, _ = create_pswf_convolutionfunction(im,
                                                      oversampling=1,
                                                      support=6)
    else:
        pswf_gcf = create_empty_image_like(im)
        pswf_gcf.data[...] = 1.0

    return pswf_gcf, cf
Example #19
0
    def ingest_visibility(self,
                          freq=None,
                          chan_width=None,
                          times=None,
                          add_errors=False,
                          block=True,
                          bandpass=False):
        if freq is None:
            freq = [1e8]
        if chan_width is None:
            chan_width = [1e6]
        if times is None:
            times = (numpy.pi / 12.0) * numpy.linspace(-3.0, 3.0, 5)

        lowcore = create_named_configuration('LOWBD2', rmax=750.0)
        frequency = numpy.array(freq)
        channel_bandwidth = numpy.array(chan_width)

        phasecentre = SkyCoord(ra=+180.0 * u.deg,
                               dec=-60.0 * u.deg,
                               frame='icrs',
                               equinox='J2000')
        if block:
            vt = create_blockvisibility(
                lowcore,
                times,
                frequency,
                channel_bandwidth=channel_bandwidth,
                weight=1.0,
                phasecentre=phasecentre,
                polarisation_frame=PolarisationFrame("stokesI"))
        else:
            vt = create_visibility(
                lowcore,
                times,
                frequency,
                channel_bandwidth=channel_bandwidth,
                weight=1.0,
                phasecentre=phasecentre,
                polarisation_frame=PolarisationFrame("stokesI"))
        cellsize = 0.001
        model = create_image_from_visibility(
            vt,
            npixel=self.npixel,
            cellsize=cellsize,
            npol=1,
            frequency=frequency,
            phasecentre=phasecentre,
            polarisation_frame=PolarisationFrame("stokesI"))
        nchan = len(self.frequency)
        flux = numpy.array(nchan * [[100.0]])
        facets = 4

        rpix = model.wcs.wcs.crpix - 1.0
        spacing_pixels = self.npixel // facets
        centers = [-1.5, -0.5, 0.5, 1.5]
        comps = list()
        for iy in centers:
            for ix in centers:
                p = int(round(rpix[0] + ix * spacing_pixels * numpy.sign(model.wcs.wcs.cdelt[0]))), \
                    int(round(rpix[1] + iy * spacing_pixels * numpy.sign(model.wcs.wcs.cdelt[1])))
                sc = pixel_to_skycoord(p[0], p[1], model.wcs, origin=1)
                comp = create_skycomponent(
                    direction=sc,
                    flux=flux,
                    frequency=frequency,
                    polarisation_frame=PolarisationFrame("stokesI"))
                comps.append(comp)
        if block:
            dft_skycomponent_visibility(vt, comps)
        else:
            dft_skycomponent_visibility(vt, comps)
        insert_skycomponent(model, comps)
        self.comps = comps
        self.model = copy_image(model)
        self.empty_model = create_empty_image_like(model)
        export_image_to_fits(
            model, '%s/test_pipeline_functions_model.fits' % (self.dir))

        if add_errors:
            # These will be the same for all calls
            numpy.random.seed(180555)
            gt = create_gaintable_from_blockvisibility(vt)
            gt = simulate_gaintable(gt, phase_error=1.0, amplitude_error=0.0)
            vt = apply_gaintable(vt, gt)

            if bandpass:
                bgt = create_gaintable_from_blockvisibility(vt, timeslice=1e5)
                bgt = simulate_gaintable(bgt,
                                         phase_error=0.01,
                                         amplitude_error=0.01,
                                         smooth_channels=4)
                vt = apply_gaintable(vt, bgt)

        return vt
Example #20
0
def image_raster_iter(im: Image,
                      facets=1,
                      overlap=0,
                      taper='flat',
                      make_flat=False) -> collections.Iterable:
    """Create an image_raster_iter generator, returning images, optionally with overlaps

    The WCS is adjusted appropriately for each raster element. Hence this is a coordinate-aware
    way to iterate through an image.

    Provided we don't break reference semantics, memory should be conserved. However make_flat
    creates a new set of images and thus reference semantics dont hold.

    To update the image in place::

        for r in image_raster_iter(im, facets=2):
            r.data[...] = numpy.sqrt(r.data[...])
            
    If the overlap is greater than zero, we choose to keep all images the same size so the
    other ring of facets are ignored. So if facets=4 and overlap > 0 then the iterator returns
    (facets-2)**2 = 4 images.
    
    A taper is applied in the overlap regions. None implies a constant value, linear is a ramp, and
    quadratic is parabolic at the ends.

    :param im: Image
    :param facets: Number of image partitions on each axis (2)
    :param overlap: overlap in pixels
    :param taper: method of tapering at the edges: 'flat' or 'linear' or 'quadratic' or 'tukey'
    :param make_flat: Make the flat images
    :returns: Generator of images

    See also
        :py:func:`rascil.processing_components.image.image_gather_facets`
        :py:func:`rascil.processing_components.image.image_scatter_facets`
    """

    assert image_is_canonical(im)

    nchan, npol, ny, nx = im.shape
    assert facets <= ny, "Cannot have more raster elements than pixels"
    assert facets <= nx, "Cannot have more raster elements than pixels"

    assert facets >= 1, "Facets cannot be zero or less"
    assert overlap >= 0, "Overlap must be zero or greater"

    if facets == 1:
        yield im
    else:

        assert overlap < (nx // facets), "Overlap in facets is too large"
        assert overlap < (ny // facets), "Overlap in facets is too large"

        # Step between facets
        sx = nx // facets + overlap
        sy = ny // facets + overlap

        # Size of facet
        dx = sx + overlap
        dy = sy + overlap

        # Step between facets
        sx = nx // facets + overlap
        sy = ny // facets + overlap

        # Size of facet
        dx = nx // facets + 2 * overlap
        dy = nx // facets + 2 * overlap

        def taper_linear():
            t = numpy.ones(dx)
            ramp = numpy.arange(0, overlap).astype(float) / float(overlap)

            t[:overlap] = ramp
            t[(dx - overlap):dx] = 1.0 - ramp
            result = numpy.outer(t, t)

            return result

        def taper_quadratic():
            t = numpy.ones(dx)
            ramp = numpy.arange(0, overlap).astype(float) / float(overlap)

            quadratic_ramp = numpy.ones(overlap)
            quadratic_ramp[0:overlap // 2] = 2.0 * ramp[0:overlap // 2]**2
            quadratic_ramp[overlap //
                           2:] = 1 - 2.0 * ramp[overlap // 2:0:-1]**2

            t[:overlap] = quadratic_ramp
            t[(dx - overlap):dx] = 1.0 - quadratic_ramp

            result = numpy.outer(t, t)
            return result

        def taper_tukey():

            xs = numpy.arange(dx) / float(dx)
            r = 2 * overlap / dx
            t = [tukey_filter(x, r) for x in xs]

            result = numpy.outer(t, t)
            return result

        i = 0
        for fy in range(facets):
            y = ny // 2 + sy * (fy - facets // 2) - overlap // 2
            for fx in range(facets):
                x = nx // 2 + sx * (fx - facets // 2) - overlap // 2
                if (x >= 0) and (x + dx) <= nx and (y >= 0) and (y + dy) <= ny:
                    # Adjust WCS
                    wcs = im.wcs.deepcopy()
                    wcs.wcs.crpix[0] -= x
                    wcs.wcs.crpix[1] -= y
                    # yield image from slice (reference!)
                    subim = create_image_from_array(
                        im.data[..., y:y + dy, x:x + dx], wcs,
                        im.polarisation_frame)
                    if overlap > 0 and make_flat:
                        flat = create_empty_image_like(subim)
                        if taper == 'linear':
                            flat.data[..., :, :] = taper_linear()
                        elif taper == 'quadratic':
                            flat.data[..., :, :] = taper_quadratic()
                        elif taper == 'tukey':
                            flat.data[..., :, :] = taper_tukey()
                        else:
                            flat.data[...] = 1.0
                        yield flat
                    else:
                        yield subim
                    i += 1
Example #21
0
def invert_list_serial_workflow(vis_list,
                                template_model_imagelist,
                                dopsf=False,
                                normalize=True,
                                facets=1,
                                vis_slices=1,
                                context='2d',
                                gcfcf=None,
                                **kwargs):
    """ Sum results from invert, iterating over the scattered image and vis_list

    :param vis_list: list of vis
    :param template_model_imagelist: list of template models
    :param dopsf: Make the PSF instead of the dirty image
    :param facets: Number of facets
    :param normalize: Normalize by sumwt
    :param vis_slices: Number of slices
    :param context: Imaging context
    :param gcfcg: tuple containing grid correction and convolution function
    :param kwargs: Parameters for functions in components
    :return: List of (image, sumwt) tuples, one per vis in vis_list

    For example::

        model_list = [create_image_from_visibility
            (v, npixel=npixel, cellsize=cellsize, polarisation_frame=pol_frame)
            for v in vis_list]

        dirty_list = invert_list_serial_workflow(vis_list, template_model_imagelist=model_list, context='wstack',
                                                    vis_slices=51)
        dirty, sumwt = dirty_list[centre]

   """

    if not isinstance(template_model_imagelist, collections.abc.Iterable):
        template_model_imagelist = [template_model_imagelist]

    c = imaging_context(context)
    vis_iter = c['vis_iterator']
    invert = c['invert']

    if facets % 2 == 0 or facets == 1:
        actual_number_facets = facets
    else:
        actual_number_facets = max(1, (facets - 1))

    def gather_image_iteration_results(results, template_model):
        result = create_empty_image_like(template_model)
        i = 0
        sumwt = numpy.zeros([template_model.nchan, template_model.npol])
        for dpatch in image_scatter_facets(result, facets=facets):
            assert i < len(
                results), "Too few results in gather_image_iteration_results"
            if results[i] is not None:
                assert len(results[i]) == 2, results[i]
                dpatch.data[...] = results[i][0].data[...]
                sumwt += results[i][1]
                i += 1
        return result, sumwt

    def invert_ignore_none(vis, model, gg):
        if vis is not None:

            return invert(vis,
                          model,
                          context=context,
                          dopsf=dopsf,
                          normalize=normalize,
                          gcfcf=gg,
                          **kwargs)
        else:
            return create_empty_image_like(model), numpy.zeros(
                [model.nchan, model.npol])

    # If we are doing facets, we need to create the gcf for each image
    if gcfcf is None and facets == 1:
        gcfcf = [create_pswf_convolutionfunction(template_model_imagelist[0])]

    # Loop over all vis_lists independently
    results_vislist = list()
    if facets == 1:
        for ivis, sub_vis_list in enumerate(vis_list):
            if len(gcfcf) > 1:
                g = gcfcf[ivis]
            else:
                g = gcfcf[0]
            # Iterate within each vis_list
            result_image = create_empty_image_like(
                template_model_imagelist[ivis])
            result_sumwt = numpy.zeros([
                template_model_imagelist[ivis].nchan,
                template_model_imagelist[ivis].npol
            ])
            for rows in vis_iter(sub_vis_list, vis_slices):
                row_vis = create_visibility_from_rows(sub_vis_list, rows)
                result = invert_ignore_none(row_vis,
                                            template_model_imagelist[ivis], g)
                if result is not None:
                    result_image.data += result[1][:, :, numpy.newaxis, numpy.
                                                   newaxis] * result[0].data
                    result_sumwt += result[1]
            result_image = normalize_sumwt(result_image, result_sumwt)
            results_vislist.append((result_image, result_sumwt))
    else:
        for ivis, sub_vis_list in enumerate(vis_list):
            # Create the graph to divide an image into facets. This is by reference.
            facet_lists = image_scatter_facets(template_model_imagelist[ivis],
                                               facets=facets)
            # Create the graph to divide the visibility into slices. This is by copy.
            sub_sub_vis_lists = visibility_scatter(sub_vis_list,
                                                   vis_iter,
                                                   vis_slices=vis_slices)

            # Iterate within each vis_list
            vis_results = list()
            for sub_sub_vis_list in sub_sub_vis_lists:
                facet_vis_results = list()
                for facet_list in facet_lists:
                    facet_vis_results.append(
                        invert_ignore_none(sub_sub_vis_list, facet_list, None))
                vis_results.append(
                    gather_image_iteration_results(
                        facet_vis_results, template_model_imagelist[ivis]))
            results_vislist.append(sum_invert_results(vis_results))

    return results_vislist
Example #22
0
 def sum_images(images):
     sum_image = create_empty_image_like(images[0][0])
     for im in images:
         sum_image.data += im[0].data
     return sum_image, images[0][1]
Example #23
0
def create_vp_generic_numeric(model,
                              pointingcentre=None,
                              diameter=15.0,
                              blockage=0.0,
                              taper='gaussian',
                              edge=0.03162278,
                              zernikes=None,
                              padding=4,
                              use_local=True,
                              rho=0.0,
                              diff=0.0):
    """
    Make an image like model and fill it with an analytical model of the primary beam
    
    The elements of the analytical model are:
    - dish, optionally blocked
    - Gaussian taper, default is -12dB at the edge
    - Offset to pointing centre (optional)
    - zernikes in a list of dictionaries. Each list element is of the form {"coeff":0.1, "noll":5}. See aotools for
    more details
    - Output image can be in RA, DEC coordinates or AZELGEO coordinates (the default). use_local=True means to use
    AZELGEO coordinates centered on 0deg 0deg.
    
    The dish is zero padded according to padding and FFT'ed to get the voltage pattern.
    
    :param model:
    :param pointingcentre: SkyCoord of desired pointing centre
    :param diameter: Diameter of dish in metres
    :param blockage: Blockage of dish in metres
    :param taper: "Gaussian" or None
    :param edge: Value of taper at the end of the dish (default corresponds to -12dB)
    :param zernikes: Zernikes to be applied as phase across the dish (see above)
    :param padding: Pad the image by this amount
    :param use_local: Use local frame (AZELGEO)?
    :return:
    """
    beam = create_empty_image_like(model)
    nchan, npol, ny, nx = beam.shape
    padded_shape = [nchan, npol, padding * ny, padding * nx]
    padded_beam = pad_image(beam, padded_shape)
    padded_beam.data = numpy.zeros(padded_beam.data.shape, dtype='complex')
    _, _, pny, pnx = padded_beam.shape

    xfr = fft_image(padded_beam)
    cx, cy = xfr.wcs.sub(2).wcs.crpix[0] - 1, xfr.wcs.sub(2).wcs.crpix[1] - 1

    for chan in range(nchan):

        # The frequency axis is the second to last in the beam
        frequency = xfr.wcs.sub(['spectral']).wcs_pix2world([chan], 0)[0]
        wavelength = const.c.to('m s^-1').value / frequency

        scalex = xfr.wcs.sub(2).wcs.cdelt[0] * wavelength
        scaley = xfr.wcs.sub(2).wcs.cdelt[1] * wavelength
        # xx, yy in metres
        xx, yy = numpy.meshgrid(scalex * (range(pnx) - cx),
                                scaley * (range(pny) - cy))

        # rr in metres
        rr = numpy.sqrt(xx**2 + yy**2)
        for pol in range(npol):
            xfr.data[chan, pol, ...] = tapered_disk(rr,
                                                    diameter / 2.0,
                                                    blockage=blockage / 2.0,
                                                    edge=edge,
                                                    taper=taper)

        if pointingcentre is not None:
            # Correct for pointing centre
            pcx, pcy = pointingcentre.to_pixel(padded_beam.wcs, origin=0)
            pxx, pyy = numpy.meshgrid((range(pnx) - cx), (range(pny) - cy))
            phase = 2 * numpy.pi * ((pcx - cx) * pxx / float(pnx) +
                                    (pcy - cy) * pyy / float(pny))
            for pol in range(npol):
                xfr.data[chan, pol, ...] *= numpy.exp(1j * phase)

        if isinstance(zernikes, collections.Iterable):
            try:
                import aotools
            except ModuleNotFoundError:
                raise ModuleNotFoundError("aotools is not installed")

            ndisk = numpy.ceil(numpy.abs(diameter / scalex)).astype('int')[0]
            ndisk = 2 * ((ndisk + 1) // 2)
            phase = numpy.zeros([ndisk, ndisk])
            for zernike in zernikes:
                phase = zernike[
                    'coeff'] * aotools.functions.zernike.zernike_noll(
                        zernike['noll'], ndisk)

            # import matplotlib.pyplot as plt
            # plt.clf()
            # plt.imshow(phase)
            # plt.colorbar()
            # plt.show()
            #
            blc = pnx // 2 - ndisk // 2
            trc = pnx // 2 + ndisk // 2
            for pol in range(npol):
                xfr.data[chan, pol, blc:trc,
                         blc:trc] = xfr.data[chan, pol, blc:trc,
                                             blc:trc] * numpy.exp(1j * phase)

    padded_beam = fft_image(xfr, padded_beam)

    # Undo padding
    beam = create_empty_image_like(model)
    beam.data = padded_beam.data[...,
                                 (pny // 2 - ny // 2):(pny // 2 + ny // 2),
                                 (pnx // 2 - nx // 2):(pnx // 2 + nx // 2)]
    for chan in range(nchan):
        beam.data[chan, ...] /= numpy.max(numpy.abs(beam.data[chan, ...]))

    set_pb_header(beam, use_local=use_local)
    return beam
def create_awterm_convolutionfunction(im,
                                      make_pb=None,
                                      nw=1,
                                      wstep=1e15,
                                      oversampling=9,
                                      support=8,
                                      use_aaf=True,
                                      maxsupport=512,
                                      pa=None,
                                      normalise=True):
    """ Fill AW projection kernel into a GridData.

    :param im: Image template
    :param make_pb: Function to make the primary beam model image (hint: use a partial)
    :param nw: Number of w planes
    :param wstep: Step in w (wavelengths)
    :param oversampling: Oversampling of the convolution function in uv space
    :return: griddata correction Image, griddata kernel as GridData
    """
    if oversampling % 2 == 0:
        oversampling += 1
        log.info("Setting oversampling to next greatest odd number {}".format(
            oversampling))

    d2r = numpy.pi / 180.0

    # We only need the griddata correction function for the PSWF so we make
    # it for the shape of the image
    nchan, npol, ony, onx = im.data.shape

    assert isinstance(im, Image)
    # Calculate the template convolution kernel.
    cf = create_convolutionfunction_from_image(im,
                                               oversampling=oversampling,
                                               support=support)

    cf_shape = list(cf.data.shape)
    assert nw > 0, "Number of w planes must be greater than zero"
    cf_shape[2] = nw
    cf.data = numpy.zeros(cf_shape).astype('complex')

    cf.grid_wcs.wcs.crpix[4] = nw // 2 + 1.0
    cf.grid_wcs.wcs.cdelt[4] = wstep
    cf.grid_wcs.wcs.ctype[4] = 'WW'
    if numpy.abs(wstep) > 0.0:
        w_list = cf.grid_wcs.sub([5]).wcs_pix2world(range(nw), 0)[0]
    else:
        w_list = [0.0]

    assert isinstance(oversampling, int)
    assert oversampling > 0

    nx = max(maxsupport, 2 * oversampling * support)
    ny = max(maxsupport, 2 * oversampling * support)

    qnx = nx // oversampling
    qny = ny // oversampling

    cf.data[...] = 0.0

    subim = copy_image(im)
    ccell = onx * numpy.abs(d2r * subim.wcs.wcs.cdelt[0]) / qnx

    subim.data = numpy.zeros([nchan, npol, qny, qnx])
    subim.wcs.wcs.cdelt[0] = -ccell / d2r
    subim.wcs.wcs.cdelt[1] = +ccell / d2r
    subim.wcs.wcs.crpix[0] = qnx // 2 + 1.0
    subim.wcs.wcs.crpix[1] = qny // 2 + 1.0

    if use_aaf:
        this_pswf_gcf, _ = create_pswf_convolutionfunction(subim,
                                                           oversampling=1,
                                                           support=6)
        norm = 1.0 / this_pswf_gcf.data
    else:
        norm = 1.0

    if make_pb is not None:
        pb = make_pb(subim)

        if pa is not None:
            rpb = convert_azelvp_to_radec(pb, subim, pa)
        else:
            rpb = convert_azelvp_to_radec(pb, subim, 0.0)

        norm *= rpb.data

    # We might need to work with a larger image
    padded_shape = [nchan, npol, ny, nx]
    thisplane = copy_image(subim)
    thisplane.data = numpy.zeros(thisplane.shape, dtype='complex')
    for z, w in enumerate(w_list):
        thisplane.data[...] = 0.0 + 0.0j
        thisplane = create_w_term_like(thisplane, w, dopol=True)
        thisplane.data *= norm
        paddedplane = pad_image(thisplane, padded_shape)
        paddedplane = fft_image(paddedplane)

        ycen, xcen = ny // 2, nx // 2
        for y in range(oversampling):
            ybeg = y + ycen + (support * oversampling) // 2 - oversampling // 2
            yend = y + ycen - (support * oversampling) // 2 - oversampling // 2
            # vv = range(ybeg, yend, -oversampling)
            for x in range(oversampling):
                xbeg = x + xcen + (support *
                                   oversampling) // 2 - oversampling // 2
                xend = x + xcen - (support *
                                   oversampling) // 2 - oversampling // 2

                # uu = range(xbeg, xend, -oversampling)
                cf.data[..., z, y,
                        x, :, :] = paddedplane.data[...,
                                                    ybeg:yend:-oversampling,
                                                    xbeg:xend:-oversampling]
                # for chan in range(nchan):
                #     for pol in range(npol):
                #         cf.data[chan, pol, z, y, x, :, :] = paddedplane.data[chan, pol, :, :][vv, :][:, uu]

    if normalise:
        norm = numpy.zeros([nchan, npol, oversampling, oversampling])
        for y in range(oversampling):
            for x in range(oversampling):
                # uu = range(xbeg, xend, -oversampling)
                norm[..., y, x] = numpy.sum(numpy.real(cf.data[:, :, 0, y,
                                                               x, :, :]),
                                            axis=(-2, -1))
        for z, _ in enumerate(w_list):
            for y in range(oversampling):
                for x in range(oversampling):
                    cf.data[:, :, z, y, x] /= norm[..., y,
                                                   x][..., numpy.newaxis,
                                                      numpy.newaxis]
    cf.data = numpy.conjugate(cf.data)

    if use_aaf:
        pswf_gcf, _ = create_pswf_convolutionfunction(im,
                                                      oversampling=1,
                                                      support=6)
    else:
        pswf_gcf = create_empty_image_like(im)
        pswf_gcf.data[...] = 1.0

    return pswf_gcf, cf
Example #25
0
def grid_gaintable_to_screen(vis,
                             gaintables,
                             screen,
                             height=3e5,
                             gaintable_slices=None,
                             scale=1.0,
                             r0=5e3,
                             type_atmosphere='ionosphere',
                             vis_slices=None,
                             **kwargs):
    """ Grid a gaintable to a screen image

    Screen axes are ['XX', 'YY', 'TIME', 'FREQ']

    The phases are just averaged per grid cell, no phase unwrapping is performed.

    :param vis:
    :param gaintables: input gaintables
    :param screen:
    :param height: Height (in m) of screen above telescope e.g. 3e5
    :param r0: r0 in meters
    :param type_atmosphere: 'ionosphere' or 'troposphere'
    :param scale: Multiply the screen by this factor
    :return: gridded screen image, weights image
    """
    assert isinstance(vis, BlockVisibility)

    station_locations = vis.configuration.xyz

    nant = station_locations.shape[0]
    t2r = numpy.pi / 43200.0

    newscreen = create_empty_image_like(screen)
    weights = create_empty_image_like(screen)
    nchan, ntimes, ny, nx = screen.shape

    number_no_weight = 0

    for gaintable in gaintables:
        ha_zero = numpy.average(calculate_blockvisibility_hourangles(vis))
        for iha, rows in enumerate(
                vis_timeslice_iter(vis, vis_slices=vis_slices)):
            v = create_visibility_from_rows(vis, rows)
            ha = numpy.average(
                calculate_blockvisibility_hourangles(v) -
                ha_zero).to('rad').value
            pp = find_pierce_points(station_locations,
                                    (gaintable.phasecentre.ra.rad + t2r * ha) *
                                    units.rad,
                                    gaintable.phasecentre.dec,
                                    height=height,
                                    phasecentre=vis.phasecentre)

            scr = numpy.angle(gaintable.gain[0, :, 0, 0, 0])
            wt = gaintable.weight[0, :, 0, 0, 0]
            for ant in range(nant):
                pp0 = pp[ant][0:2]
                for freq in vis.frequency:
                    scale = numpy.power(r0 / 5000.0, -5.0 / 3.0)
                    if type_atmosphere == 'troposphere':
                        # In troposphere files, the units are phase in radians.
                        screen_to_phase = scale
                    else:
                        # In the ionosphere file, the units are dTEC.
                        screen_to_phase = -scale * 8.44797245e9 / freq
                    worldloc = [pp0[0], pp0[1], ha, freq]
                    pixloc = newscreen.wcs.wcs_world2pix([worldloc],
                                                         0)[0].astype('int')
                    assert pixloc[0] >= 0
                    assert pixloc[0] < nx
                    assert pixloc[1] >= 0
                    assert pixloc[1] < ny
                    pixloc[3] = 0
                    newscreen.data[
                        pixloc[3], pixloc[2], pixloc[1],
                        pixloc[0]] += wt[ant] * scr[ant] / screen_to_phase
                    weights.data[pixloc[3], pixloc[2], pixloc[1],
                                 pixloc[0]] += wt[ant]
                    if wt[ant] == 0.0:
                        number_no_weight += 1
    if number_no_weight > 0:
        log.warning(
            "grid_gaintable_to_screen: %d pierce points are have no weight" %
            (number_no_weight))

    assert numpy.max(weights.data) > 0.0, "No points were gridded"

    newscreen.data[weights.data > 0.0] = newscreen.data[
        weights.data > 0.0] / weights.data[weights.data > 0.0]

    return newscreen, weights
    def test_mpccal_MPCCAL_manysources_subimages(self):

        self.actualSetup()

        model = create_empty_image_like(self.theta_list[0].image)

        if rsexecute.using_dask:
            progress = None
        else:
            progress = self.progress

        future_vis = rsexecute.scatter(self.all_skymodel_noniso_vis)
        future_model = rsexecute.scatter(model)
        future_theta_list = rsexecute.scatter(self.theta_list)
        result = mpccal_skymodel_list_rsexecute_workflow(
            future_vis,
            future_model,
            future_theta_list,
            mpccal_progress=progress,
            nmajor=5,
            context='2d',
            algorithm='hogbom',
            scales=[0, 3, 10],
            fractional_threshold=0.3,
            threshold=0.2,
            gain=0.1,
            niter=1000,
            psf_support=256,
            deconvolve_facets=8,
            deconvolve_overlap=8,
            deconvolve_taper='tukey')

        (self.theta_list, residual) = rsexecute.compute(result, sync=True)

        combined_model = calculate_skymodel_equivalent_image(self.theta_list)

        psf_obs = invert_list_rsexecute_workflow(
            [self.all_skymodel_noniso_vis], [model], context='2d', dopsf=True)
        result = restore_list_rsexecute_workflow([combined_model], psf_obs,
                                                 [(residual, 0.0)])
        result = rsexecute.compute(result, sync=True)

        if self.persist:
            export_image_to_fits(
                residual,
                rascil_path('test_results/test_mpccal_no_edge_residual.fits'))
        if self.persist:
            export_image_to_fits(
                result[0],
                rascil_path('test_results/test_mpccal_no_edge_restored.fits'))
        if self.persist:
            export_image_to_fits(
                combined_model,
                rascil_path(
                    'test_results/test_mpccal_no_edge_deconvolved.fits'))

        recovered_mpccal_components = find_skycomponents(result[0],
                                                         fwhm=2,
                                                         threshold=0.32,
                                                         npixels=12)

        def max_flux(elem):
            return numpy.max(elem.flux)

        recovered_mpccal_components = sorted(recovered_mpccal_components,
                                             key=max_flux,
                                             reverse=True)

        assert recovered_mpccal_components[
            0].name == 'Segment 8', recovered_mpccal_components[0].name
        assert numpy.abs(recovered_mpccal_components[0].flux[0, 0] - 7.773751416364857) < 1e-7, \
            recovered_mpccal_components[0].flux[0, 0]

        newscreen = create_empty_image_like(self.screen)
        gaintables = [th.gaintable for th in self.theta_list]
        newscreen, weights = grid_gaintable_to_screen(
            self.all_skymodel_noniso_blockvis, gaintables, newscreen)
        if self.persist:
            export_image_to_fits(
                newscreen,
                rascil_path('test_results/test_mpccal_no_edge_screen.fits'))
        if self.persist:
            export_image_to_fits(
                weights,
                rascil_path(
                    'test_results/test_mpccal_no_edge_screenweights.fits'))

        rsexecute.close()