Esempio n. 1
0
 def test_create_image_from_array(self):
     m31model_by_array = create_image_from_array(self.m31image.data, wcs=None)
     m31model_by_array = create_image_from_array(self.m31image.data, self.m31image.wcs)
     add_image(self.m31image, m31model_by_array)
     add_image(self.m31image, m31model_by_array, docheckwcs=True)
     assert m31model_by_array.shape == self.m31image.shape
     log.debug(export_image_to_fits(self.m31image, fitsfile='%s/test_model.fits' % (self.dir)))
     log.debug(qa_image(m31model_by_array, context='test_create_from_image'))
def invert_2d_from_grid(uv_grid_vis,
                        sumwt,
                        im: Image,
                        normalize: bool = True,
                        **kwargs):
    """
    Perform the 2d fourier transform on the gridded uv visibility
    
    :param uv_grid_vis: the gridded uv visibility
    :param sumwt: the weight for the uv visibility
    :param im: image template (not changed)
    :param return: resulting image[nchan, npol, ny, nx], sum of weights[nchan, npol]
    """

    ### Calculate gcf -> grid correction function ###
    # 2D Prolate spheroidal angular function is separable
    npixel = get_parameter(kwargs, "npixel", 512)

    nx = npixel
    ny = npixel

    nu = numpy.abs(2.0 * (numpy.arange(nx) - nx // 2) / nx)
    gcf1d, _ = grdsf(nu)
    gcf = numpy.outer(gcf1d, gcf1d)
    gcf[gcf > 0.0] = gcf.max() / gcf[gcf > 0.0]

    result = numpy.real(ifft(uv_grid_vis)) * gcf
    #Create image array

    resultimage = create_image_from_array(result, im.wcs)
    if normalize:
        resultimage = normalize_sumwt(resultimage, sumwt)

    return resultimage, sumwt
Esempio n. 3
0
    def test_w_kernel_list(self):
        oversampling = 4
        kernelwidth = 128
        kernel_indices, kernels = w_kernel_list(self.vis,
                                                self.model,
                                                kernelwidth=kernelwidth,
                                                wstep=50,
                                                oversampling=oversampling)
        assert numpy.max(numpy.abs(kernels[0].data)) > 0.0
        assert len(kernel_indices) > 0
        assert max(kernel_indices) == len(kernels) - 1
        assert type(kernels[0]) == numpy.ndarray
        assert len(kernels[0].shape) == 4
        assert kernels[0].shape == (oversampling, oversampling, kernelwidth, kernelwidth), \
            "Actual shape is %s" % str(kernels[0].shape)
        kernel0 = create_image_from_array(kernels[0], self.model.wcs)
        kernel0.data = kernel0.data.real
        export_image_to_fits(kernel0,
                             "%s/test_w_kernel_list_kernel0.fits" % (self.dir))

        with self.assertRaises(AssertionError):
            kernel_indices, kernels = w_kernel_list(self.vis,
                                                    self.model,
                                                    kernelwidth=32,
                                                    wstep=50,
                                                    oversampling=3,
                                                    maxsupport=128)
Esempio n. 4
0
def reppre_ifft_kernel(ix, data_extract_lsm, dep_image, insert_method,
                       applybeam):
    frequency = dep_image["frequency"]
    comps = []
    for i in data_extract_lsm.value:
        if i[0][2] == ix[2]:
            comps = i[1]

    wcs = dep_image["wcs"]
    shape = dep_image["shape"]
    polarisation_frame = dep_image["polarisation_frame"]
    model = create_image_from_array(np.zeros(shape),
                                    wcs,
                                    polarisation_frame=polarisation_frame)

    model = insert_skycomponent(model, comps, insert_method=insert_method)

    if applybeam:
        beam = create_low_test_beam(model)
        model.data = model.data * beam

    result = model
    label = "Reprojection Predict + IFFT (14645.6 MB, 2.56 Tflop) "
    # print(label + str(result))
    return result
Esempio n. 5
0
 def setUp(self):
     self.dir = './test_results'
     os.makedirs(self.dir, exist_ok=True)
     self.lowcore = create_named_configuration('LOWBD2-CORE')
     self.times = (numpy.pi / (12.0)) * numpy.linspace(-3.0, 3.0, 7)
     self.frequency = numpy.array([1e8])
     self.channel_bandwidth = numpy.array([1e6])
     self.phasecentre = SkyCoord(ra=+180.0 * u.deg, dec=-60.0 * u.deg, frame='icrs', equinox=2000.0)
     self.vis = create_visibility(self.lowcore, self.times, self.frequency,
                                  channel_bandwidth=self.channel_bandwidth,
                                  phasecentre=self.phasecentre, weight=1.0,
                                  polarisation_frame=PolarisationFrame('stokesI'))
     self.vis.data['vis'] *= 0.0
     
     # Create model
     self.test_model = create_test_image(cellsize=0.001, phasecentre=self.vis.phasecentre,
                                         frequency=self.frequency)
     self.vis = predict_2d(self.vis, self.test_model)
     assert numpy.max(numpy.abs(self.vis.vis)) > 0.0
     self.model = create_image_from_visibility(self.vis, npixel=512, cellsize=0.001,
                                               polarisation_frame=PolarisationFrame('stokesI'))
     self.dirty, sumwt = invert_2d(self.vis, self.model)
     self.psf, sumwt = invert_2d(self.vis, self.model, dopsf=True)
     window = numpy.zeros(shape=self.model.shape, dtype=numpy.bool)
     window[..., 129:384, 129:384] = True
     self.innerquarter = create_image_from_array(window, self.model.wcs)
Esempio n. 6
0
File: bags.py Progetto: Jxt1/arlo
def sum_invert_bag_results(invert_list, normalize=True):
    """Sum a list of invert results, optionally normalizing at the end

    Can be used in bag.map()
    
    :param invert_list: List of results from invert: Image, weight tuples
    :param normalize: Normalize by the sum of weights
    """
    assert isinstance(invert_list, collections.Iterable), invert_list
    result = None
    weight = None
    for i, a in enumerate(invert_list):
        assert isinstance(a[0], Image), "Item is not an image: %s" % str(a[0])
        if i == 0:
            result = create_image_from_array(a[0].data * a[1], a[0].wcs, a[0].polarisation_frame)
            weight = a[1]
        else:
            result.data += a[0].data * a[1]
            weight += a[1]
    
    assert weight is not None and result is not None, "No valid images found"
    
    if normalize:
        result = normalize_sumwt(result, weight)
    
    return result, weight
Esempio n. 7
0
def image_gather_channels(image_list: List[Image],
                          im: Image = None,
                          subimages=1) -> Image:
    """Gather a list of subimages back into an image using the channel_iterator
    
    If the template image is not given then ti will be formed assuming that the list has
    been generated by image_scatter_channels with subimages = number of channels

    :param image_list: List of subimages
    :param im: Output image
    :param facets: Number of image partitions on each axis (2)
    :return: list of subimages
    """

    if im is None:
        nchan = len(image_list)
        _, npol, ny, nx = image_list[0].shape
        im_shape = nchan, npol, ny, ny
        im = create_image_from_array(
            numpy.zeros(im_shape, dtype=image_list[0].data.dtype),
            image_list[0].wcs, image_list[0].polarisation_frame)

    for i, slab in enumerate(image_channel_iter(im, subimages=subimages)):
        slab.data[...] = image_list[i].data[...]

    return im
Esempio n. 8
0
 def setUp(self):
     self.dir = './test_results'
     os.makedirs(self.dir, exist_ok=True)
     self.niter = 1000
     self.lowcore = create_named_configuration('LOWBD2-CORE')
     self.nchan = 5
     self.times = (numpy.pi / 12.0) * numpy.linspace(-3.0, 3.0, 7)
     self.frequency = numpy.linspace(0.9e8, 1.1e8, self.nchan)
     self.channel_bandwidth = numpy.array(self.nchan * [self.frequency[1] - self.frequency[0]])
     self.phasecentre = SkyCoord(ra=+0.0 * u.deg, dec=-45.0 * u.deg, frame='icrs', equinox='J2000')
     self.vis = create_visibility(self.lowcore, self.times, self.frequency, self.channel_bandwidth,
                                  phasecentre=self.phasecentre, weight=1.0,
                                  polarisation_frame=PolarisationFrame('stokesI'))
     self.vis.data['vis'] *= 0.0
     
     # Create model
     self.test_model = create_low_test_image_from_gleam(npixel=512, cellsize=0.001,
                                                        phasecentre=self.vis.phasecentre,
                                                        frequency=self.frequency,
                                                        channel_bandwidth=self.channel_bandwidth)
     beam = create_low_test_beam(self.test_model)
     export_image_to_fits(beam, "%s/test_deconvolve_msmfsclean_beam.fits" % self.dir)
     self.test_model.data *= beam.data
     export_image_to_fits(self.test_model, "%s/test_deconvolve_msmfsclean_model.fits" % self.dir)
     self.vis = predict_2d(self.vis, self.test_model)
     assert numpy.max(numpy.abs(self.vis.vis)) > 0.0
     self.model = create_image_from_visibility(self.vis, npixel=512, cellsize=0.001,
                                               polarisation_frame=PolarisationFrame('stokesI'))
     self.dirty, sumwt = invert_2d(self.vis, self.model)
     self.psf, sumwt = invert_2d(self.vis, self.model, dopsf=True)
     export_image_to_fits(self.dirty, "%s/test_deconvolve_msmfsclean_dirty.fits" % self.dir)
     export_image_to_fits(self.psf, "%s/test_deconvolve_msmfsclean_psf.fits" % self.dir)
     window = numpy.ones(shape=self.model.shape, dtype=numpy.bool)
     window[..., 129:384, 129:384] = True
     self.innerquarter = create_image_from_array(window, self.model.wcs)
Esempio n. 9
0
def create_low_test_beam(model: Image) -> Image:
    """Create a test power beam for LOW using an image from OSKAR

    :param model: Template image
    :return: Image
    """

    beam = import_image_from_fits(arl_path('data/models/SKA1_LOW_beam.fits'))

    # Scale the image cellsize to account for the different in frequencies. Eventually we will want to
    # use a frequency cube
    log.info("create_low_test_beam: primary beam is defined at %.3f MHz" %
             (beam.wcs.wcs.crval[2] * 1e-6))

    nchan, npol, ny, nx = model.shape

    # We need to interpolate each frequency channel separately. The beam is assumed to just scale with
    # frequency.

    reprojected_beam = create_empty_image_like(model)

    for chan in range(nchan):

        model2dwcs = model.wcs.sub(2).deepcopy()
        model2dshape = [model.shape[2], model.shape[3]]
        beam2dwcs = beam.wcs.sub(2).deepcopy()

        # The frequency axis is the second to last in the beam
        frequency = model.wcs.sub(['spectral']).wcs_pix2world([chan], 0)[0]
        fscale = beam.wcs.wcs.crval[2] / frequency

        beam2dwcs.wcs.cdelt = fscale * beam.wcs.sub(2).wcs.cdelt
        beam2dwcs.wcs.crpix = beam.wcs.sub(2).wcs.crpix
        beam2dwcs.wcs.crval = model.wcs.sub(2).wcs.crval
        beam2dwcs.wcs.ctype = model.wcs.sub(2).wcs.ctype
        model2dwcs.wcs.crpix = [
            model.shape[2] // 2 + 1, model.shape[3] // 2 + 1
        ]

        beam2d = create_image_from_array(beam.data[0, 0, :, :], beam2dwcs,
                                         model.polarisation_frame)
        reprojected_beam2d, footprint = reproject_image(beam2d,
                                                        model2dwcs,
                                                        shape=model2dshape)
        assert numpy.max(
            footprint.data) > 0.0, "No overlap between beam and model"

        reprojected_beam2d.data *= reprojected_beam2d.data
        reprojected_beam2d.data[footprint.data <= 0.0] = 0.0
        for pol in range(npol):
            reprojected_beam.data[chan,
                                  pol, :, :] = reprojected_beam2d.data[:, :]

    return reprojected_beam
Esempio n. 10
0
def image_raster_iter(im: Image, **kwargs):
    """Create an image_raster_iter generator, returning images, optionally with overlaps

    The WCS is adjusted appropriately for each raster element. Hence this is a coordinate-aware
    way to iterate through an image.

    Provided we don't break reference semantics, memory should be conserved

    To update the image in place:
        for r in raster(im, facets=2)::
            r.data[...] = numpy.sqrt(r.data[...])

    :param im: Image
    :param facets: Number of image partitions on each axis (2)
    :param overlap: overlap in pixels
    :param kwargs: throw away unwanted parameters
    """

    nchan, npol, ny, nx = im.shape
    facets = get_parameter(kwargs, "facets", 1)
    overlap = get_parameter(kwargs, 'overlap', 0)
    log.debug("raster_overlap: predicting using %d x %d image partitions" %
              (facets, facets))
    assert facets <= ny, "Cannot have more raster elements than pixels"
    assert facets <= nx, "Cannot have more raster elements than pixels"

    sx = int((nx // facets))
    sy = int((ny // facets))
    dx = int((nx // facets) + 2 * overlap)
    dy = int((ny // facets) + 2 * overlap)

    log.debug('raster_overlap: spacing of raster (%d, %d)' % (dx, dy))

    for fy in range(facets):
        y = ny // 2 + sy * (fy - facets // 2) - overlap
        for fx in range(facets):
            x = nx // 2 + sx * (fx - facets // 2) - overlap
            if (x >= 0) and (x + dx) <= nx and (y >= 0) and (y + dy) <= ny:
                log.debug('raster_overlap: partition (%d, %d) of (%d, %d)' %
                          (fy, fx, facets, facets))
                # Adjust WCS
                wcs = im.wcs.deepcopy()
                wcs.wcs.crpix[0] -= x
                wcs.wcs.crpix[1] -= y
                # yield image from slice (reference!)
                yield create_image_from_array(im.data[..., y:y + dy, x:x + dx],
                                              wcs, im.polarisation_frame)
Esempio n. 11
0
def gather_image_kernel(data, facets, image_iter, **kwargs):
    data_dict, image_metadata = data
    wcs, polarisation_frame, shape = image_metadata
    result = create_image_from_array(np.empty(shape),
                                     wcs=wcs,
                                     polarisation_frame=polarisation_frame)
    i = 0
    sumwt = np.zeros([shape[0], shape[1]])
    for dpatch in image_iter(result, facets=facets, **kwargs):
        assert i < len(
            data_dict), "Too few results in gather_image_iteration_results"
        if data_dict[i] is not None:
            assert len(data_dict[i]) == 2, data_dict[i]
            dpatch.data[...] = data_dict[i][0]
            sumwt += data_dict[i][1]
            i += 1
    return result, sumwt
Esempio n. 12
0
def create_deconvolve_graph(sc, dirty_graph, psf_graph, model_graph, **kwargs):
    def make_cube(img_graph, nchan, has_sumwt=True, ret_idx=False):
        imgs = img_graph.collect()
        l = [Image() for i in range(nchan)]
        idx = [() for i in range(nchan)]
        for tup in imgs:
            if has_sumwt:
                l[tup[0][2]] = tup[1][0]
            else:
                l[tup[0][2]] = tup[1]
            if ret_idx:
                idx[tup[0][2]] = tup[0]
        if ret_idx:
            return l, idx
        else:
            return l

    nchan = get_parameter(kwargs, "nchan", 1)
    algorithm = get_parameter(kwargs, "algorithm", "mmclean")
    if algorithm == "mmclean" and nchan > 1:
        im, idx = make_cube(dirty_graph, nchan, ret_idx=True)
        dirty_cube = image_gather_channels(im, subimages=nchan)
        psf_cube = image_gather_channels(make_cube(psf_graph, nchan),
                                         subimages=nchan)
        model_cube = image_gather_channels(make_cube(model_graph,
                                                     nchan,
                                                     has_sumwt=False),
                                           subimages=nchan)
        result = deconvolve_cube(dirty_cube, psf_cube, **kwargs)
        result[0].data += model_cube.data
        ret_img = []
        pl = result[0].polarisation_frame
        nchan, npol, ny, nx = result[0].shape
        for i in range(nchan):
            wcs = result[0].wcs.deepcopy()
            wcs.wcs.crpix[3] -= i
            ret_img.append(
                create_image_from_array(
                    result[0].data[i, ...].reshape(1, npol, ny, nx), wcs, pl))

        return sc.parallelize([i for i in range(nchan)
                               ]).map(lambda ix: (idx[ix], ret_img[ix]))

    else:
        return deconvolve_handle(dirty_graph, psf_graph, model_graph, **kwargs)
Esempio n. 13
0
def gather_deconvolved_image_kernel(datas, facets, **kwargs):
    data, image_metadata = datas
    dic = {}
    for im in data:
        dic[im.facet_id] = im

    wcs, polarisation_frame, shape = image_metadata
    result = create_image_from_array(np.empty(shape),
                                     wcs=wcs,
                                     polarisation_frame=polarisation_frame)
    i = 0
    for dpatch in image_raster_iter(result, facets=facets, **kwargs):
        assert i < len(
            data), "Too few results in gather_image_iteration_results"
        dpatch.data[...] = dic[i].data[...]
        i += 1

    return result
def image_raster_iter(im: Image, **kwargs) -> Image:
    """Create an image_raster_iter generator, returning images

    The WCS is adjusted appropriately for each raster element. Hence this is a coordinate-aware
    way to iterate through an image.

    Provided we don't break reference semantics, memory should be conserved

    To update the image in place:
        for r in raster(im, facets=2)::
            r.data[...] = numpy.sqrt(r.data[...])

    :param im: Image
    :param facets: Number of image partitions on each axis (2)
    :param kwargs: throw away unwanted parameters
    """

    facets = get_parameter(kwargs, "facets", 1)
    log.debug("raster: predicting using %d x %d image partitions" %
              (facets, facets))
    assert facets <= im.nheight, "Cannot have more raster elements than pixels"
    assert facets <= im.nwidth, "Cannot have more raster elements than pixels"
    assert im.nheight % facets == 0, "The %d partitions must exactly fill the image %d" % (
        facets, im.nheight)
    assert im.nwidth % facets == 0, "The %d partitions must exactly fill the image %d" % (
        facets, im.width)

    dx = int(im.nwidth // facets)
    dy = int(im.nheight // facets)
    log.debug('raster: spacing of raster (%d, %d)' % (dx, dy))

    for y in range(0, im.nheight, dy):
        for x in range(0, im.nwidth, dx):
            log.debug('raster: partition (%d, %d) of (%d, %d)' %
                      (x // dx, y // dy, facets, facets))

            # Adjust WCS
            wcs = im.wcs.deepcopy()
            wcs.wcs.crpix[0] -= x
            wcs.wcs.crpix[1] -= y

            # Yield image from slice (reference!)
            yield create_image_from_array(im.data[..., y:y + dy, x:x + dx],
                                          wcs, im.polarisation_frame)
def image_channel_iter(im: Image, subimages=1) -> Image:
    """Create a image_channel_iter generator, returning images

    The WCS is adjusted appropriately for each raster element. Hence this is a coordinate-aware
    way to iterate through an image.

    Provided we don't break reference semantics, memory should be conserved

    To update the image in place:
        for r in raster(im, facets=2)::
            r.data[...] = numpy.sqrt(r.data[...])

    :param im: Image
    :param channel_width: Number of image partitions on each axis (2)
    """

    nchan, npol, ny, nx = im.shape

    assert subimages <= nchan, "More subimages %d than channels %d" % (
        subimages, nchan)
    step = nchan // subimages
    channels = numpy.array(range(0, nchan, step), dtype='int')
    assert len(
        channels
    ) == subimages, "subimages %d does not match length of channels %d" % (
        subimages, len(channels))

    for i, channel in enumerate(channels):
        if i + 1 < len(channels):
            channel_max = channels[i + 1]
        else:
            channel_max = nchan

        # Adjust WCS
        wcs = im.wcs.deepcopy()
        wcs.wcs.crpix[3] -= channel

        # Yield image from slice (reference!)
        yield create_image_from_array(im.data[channel:channel_max, ...], wcs,
                                      im.polarisation_frame)
Esempio n. 16
0
def deconvolve_cube(dirty: Image, psf: Image, **kwargs):
    """ Clean using a variety of algorithms
    
    Functions that clean a dirty image using a point spread function. The algorithms available are:
    
    hogbom: Hogbom CLEAN See: Hogbom CLEAN A&A Suppl, 15, 417, (1974)
    
    msclean: MultiScale CLEAN See: Cornwell, T.J., Multiscale CLEAN (IEEE Journal of Selected Topics in Sig Proc,
    2008 vol. 2 pp. 793-801)

    mfsmsclean: MultiScale Multi-Frequency See: U. Rau and T. J. Cornwell, “A multi-scale multi-frequency
    deconvolution algorithm for synthesis imaging in radio interferometry,” A&A 532, A71 (2011).
    
    For example::
    
        comp, residual = deconvolve_cube(dirty, psf, niter=1000, gain=0.7, algorithm='msclean',
                                         scales=[0, 3, 10, 30], threshold=0.01)
    
    :param dirty: Image dirty image
    :param psf: Image Point Spread Function
    :param window: Window image (Bool) - clean where True
    :param algorithm: Cleaning algorithm: 'msclean'|'hogbom'|'mfsmsclean'
    :param gain: loop gain (float) 0.7
    :param threshold: Clean threshold (0.0)
    :param fracthres: Fractional threshold (0.01)
    :param scales: Scales (in pixels) for multiscale ([0, 3, 10, 30])
    :param nmoments: Number of frequency moments (default 3)
    :param findpeak: Method of finding peak in mfsclean: 'Algorithm1'|'ASKAPSoft'|'CASA'|'ARL', Default is ARL.
    :returns: componentimage, residual
    
    """
    assert type(dirty) == Image, "Type is %s" % (type(dirty))
    assert type(psf) == Image, "Type is %s" % (type(psf))

    window = get_parameter(kwargs, 'window', None)
    if window == 'quarter':
        qx = dirty.shape[3] // 4
        qy = dirty.shape[2] // 4
        window = numpy.zeros_like(dirty.data)
        window[..., (qy + 1):3 * qy, (qx + 1):3 * qx] = 1.0
        log.info('deconvolve_cube: Cleaning inner quarter of each sky plane')
    else:
        window = None

    psf_support = get_parameter(kwargs, 'psf_support', None)
    if isinstance(psf_support, int):
        if (psf_support < psf.shape[2] // 2) and (
            (psf_support < psf.shape[3] // 2)):
            centre = [psf.shape[2] // 2, psf.shape[3] // 2]
            psf.data = psf.data[..., (centre[0] - psf_support):(centre[0] +
                                                                psf_support),
                                (centre[1] - psf_support):(centre[1] +
                                                           psf_support)]
            log.info('deconvolve_cube: PSF support = +/- %d pixels' %
                     (psf_support))

    algorithm = get_parameter(kwargs, 'algorithm', 'msclean')

    if algorithm == 'msclean':
        log.info(
            "deconvolve_cube: Multi-scale clean of each polarisation and channel separately"
        )
        gain = get_parameter(kwargs, 'gain', 0.7)
        assert 0.0 < gain < 2.0, "Loop gain must be between 0 and 2"
        thresh = get_parameter(kwargs, 'threshold', 0.0)
        assert thresh >= 0.0
        niter = get_parameter(kwargs, 'niter', 100)
        assert niter > 0
        scales = get_parameter(kwargs, 'scales', [0, 3, 10, 30])
        fracthresh = get_parameter(kwargs, 'fractional_threshold', 0.01)
        assert 0.0 < fracthresh < 1.0

        comp_array = numpy.zeros_like(dirty.data)
        residual_array = numpy.zeros_like(dirty.data)
        for channel in range(dirty.data.shape[0]):
            for pol in range(dirty.data.shape[1]):
                if psf.data[channel, pol, :, :].max():
                    log.info("deconvolve_cube: Processing pol %d, channel %d" %
                             (pol, channel))
                    if window is None:
                        comp_array[channel, pol, :, :], residual_array[channel, pol, :, :] = \
                            msclean(dirty.data[channel, pol, :, :], psf.data[channel, pol, :, :],
                                    None, gain, thresh, niter, scales, fracthresh)
                    else:
                        comp_array[channel, pol, :, :], residual_array[channel, pol, :, :] = \
                            msclean(dirty.data[channel, pol, :, :], psf.data[channel, pol, :, :],
                                    window[channel, pol, :, :], gain, thresh, niter, scales, fracthresh)
                else:
                    log.info("deconvolve_cube: Skipping pol %d, channel %d" %
                             (pol, channel))

        comp_image = create_image_from_array(comp_array, dirty.wcs)
        residual_image = create_image_from_array(residual_array, dirty.wcs)

    elif algorithm == 'msmfsclean' or algorithm == 'mfsmsclean':
        findpeak = get_parameter(kwargs, "findpeak", 'ARL')

        log.info(
            "deconvolve_cube: Multi-scale multi-frequency clean of each polarisation separately"
        )
        nmoments = get_parameter(kwargs, "nmoments", 3)
        assert nmoments > 0, "Number of frequency moments must be greater than zero"
        dirty_taylor = calculate_image_frequency_moments(dirty,
                                                         nmoments=nmoments)
        psf_taylor = calculate_image_frequency_moments(psf,
                                                       nmoments=2 * nmoments)

        gain = get_parameter(kwargs, 'gain', 0.7)
        assert 0.0 < gain < 2.0, "Loop gain must be between 0 and 2"
        thresh = get_parameter(kwargs, 'threshold', 0.0)
        assert thresh >= 0.0
        niter = get_parameter(kwargs, 'niter', 100)
        assert niter > 0
        scales = get_parameter(kwargs, 'scales', [0, 3, 10, 30])
        fracthresh = get_parameter(kwargs, 'fractional_threshold', 0.1)
        assert 0.0 < fracthresh < 1.0

        comp_array = numpy.zeros(dirty_taylor.data.shape)
        residual_array = numpy.zeros(dirty_taylor.data.shape)
        for pol in range(dirty_taylor.data.shape[1]):
            if psf_taylor.data[0, pol, :, :].max():
                log.info("deconvolve_cube: Processing pol %d" % (pol))
                if window is None:
                    comp_array[:, pol, :, :], residual_array[:, pol, :, :] = \
                        msmfsclean(dirty_taylor.data[:, pol, :, :], psf_taylor.data[:, pol, :, :],
                                None, gain, thresh, niter, scales, fracthresh, findpeak)
                else:
                    comp_array[:, pol, :, :], residual_array[:, pol, :, :] = \
                        msmfsclean(dirty_taylor.data[:, pol, :, :], psf_taylor.data[:, pol, :, :],
                                window[:, pol, :, :], gain, thresh, niter, scales, fracthresh, findpeak)
            else:
                log.info("deconvolve_cube: Skipping pol %d" % (pol))

        comp_image = create_image_from_array(comp_array, dirty_taylor.wcs)
        residual_image = create_image_from_array(residual_array,
                                                 dirty_taylor.wcs)

        return_moments = get_parameter(kwargs, "return_moments", False)
        if not return_moments:
            log.info("Deconvolve_cube: calculating spectral cubes")
            comp_image = calculate_image_from_frequency_moments(
                dirty, comp_image)
            residual_image = calculate_image_from_frequency_moments(
                dirty, residual_image)
        else:
            log.info("Deconvolve_cube: constructed moment cubes")

    elif algorithm == 'hogbom':
        log.info(
            "deconvolve_cube: Hogbom clean of each polarisation and channel separately"
        )
        gain = get_parameter(kwargs, 'gain', 0.7)
        assert 0.0 < gain < 2.0, "Loop gain must be between 0 and 2"
        thresh = get_parameter(kwargs, 'threshold', 0.0)
        assert thresh >= 0.0
        niter = get_parameter(kwargs, 'niter', 100)
        assert niter > 0
        fracthresh = get_parameter(kwargs, 'fractional_threshold', 0.1)
        assert 0.0 <= fracthresh < 1.0

        comp_array = numpy.zeros(dirty.data.shape)
        residual_array = numpy.zeros(dirty.data.shape)
        for channel in range(dirty.data.shape[0]):
            for pol in range(dirty.data.shape[1]):
                if psf.data[channel, pol, :, :].max():
                    log.info("deconvolve_cube: Processing pol %d, channel %d" %
                             (pol, channel))
                    if window is None:
                        comp_array[channel, pol, :, :], residual_array[channel, pol, :, :] = \
                            hogbom(dirty.data[channel, pol, :, :], psf.data[channel, pol, :, :],
                                   None, gain, thresh, niter, fracthresh)
                    else:
                        comp_array[channel, pol, :, :], residual_array[channel, pol, :, :] = \
                            hogbom(dirty.data[channel, pol, :, :], psf.data[channel, pol, :, :],
                                   window[channel, pol, :, :], gain, thresh, niter, fracthresh)
                else:
                    log.info("deconvolve_cube: Skipping pol %d, channel %d" %
                             (pol, channel))

        comp_image = create_image_from_array(comp_array, dirty.wcs)
        residual_image = create_image_from_array(residual_array, dirty.wcs)
    else:
        raise ValueError('deconvolve_cube: Unknown algorithm %s' % algorithm)

    return comp_image, residual_image
Esempio n. 17
0
def create_low_test_image_from_gleam(npixel=512,
                                     polarisation_frame=PolarisationFrame(
                                         "stokesI"),
                                     cellsize=0.000015,
                                     frequency=numpy.array([1e8]),
                                     channel_bandwidth=numpy.array([1e6]),
                                     phasecentre=None,
                                     kind='cubic',
                                     applybeam=False,
                                     flux_limit=0.1,
                                     radius=None,
                                     insert_method='Nearest',
                                     nchan=16) -> Image:
    """Create LOW test image from the GLEAM survey

    Stokes I is estimated from a cubic spline fit to the measured fluxes. The polarised flux is always zero.
    
    See http://www.mwatelescope.org/science/gleam-survey The catalog is available from Vizier.
    
    VIII/100   GaLactic and Extragalactic All-sky MWA survey  (Hurley-Walker+, 2016)

    GaLactic and Extragalactic All-sky Murchison Wide Field Array (GLEAM) survey. I: A low-frequency extragalactic
    catalogue. Hurley-Walker N., et al., Mon. Not. R. Astron. Soc., 464, 1146-1167 (2017), 2017MNRAS.464.1146H

    :param npixel: Number of pixels
    :param polarisation_frame: Polarisation frame (default PolarisationFrame("stokesI"))
    :param cellsize: cellsize in radians
    :param frequency:
    :param channel_bandwidth: Channel width (Hz)
    :param phasecentre: phasecentre (SkyCoord)
    :param kind: Kind of interpolation (see scipy.interpolate.interp1d) Default: linear
    :return: Image
    
    """

    if phasecentre is None:
        phasecentre = SkyCoord(ra=+15.0 * u.deg,
                               dec=-35.0 * u.deg,
                               frame='icrs',
                               equinox='J2000')

    if radius is None:
        radius = npixel * cellsize / numpy.sqrt(2.0)

    sc = create_low_test_skycomponents_from_gleam(
        flux_limit=flux_limit,
        polarisation_frame=polarisation_frame,
        frequency=frequency,
        phasecentre=phasecentre,
        kind=kind,
        radius=radius,
        nchan=nchan)
    if polarisation_frame is None:
        polarisation_frame = PolarisationFrame("stokesI")

    npol = polarisation_frame.npol
    nchan = len(frequency)
    shape = [nchan, npol, npixel, npixel]
    w = WCS(naxis=4)
    # The negation in the longitude is needed by definition of RA, DEC
    w.wcs.cdelt = [
        -cellsize * 180.0 / numpy.pi, cellsize * 180.0 / numpy.pi, 1.0,
        channel_bandwidth[0]
    ]
    w.wcs.crpix = [npixel // 2 + 1, npixel // 2 + 1, 1.0, 1.0]
    w.wcs.ctype = ["RA---SIN", "DEC--SIN", 'STOKES', 'FREQ']
    w.wcs.crval = [phasecentre.ra.deg, phasecentre.dec.deg, 1.0, frequency[0]]
    w.naxis = 4
    w.wcs.radesys = 'ICRS'
    w.wcs.equinox = 2000.0

    model = create_image_from_array(numpy.zeros(shape),
                                    w,
                                    polarisation_frame=polarisation_frame)

    model = insert_skycomponent(model, sc, insert_method=insert_method)
    if applybeam:
        beam = create_low_test_beam(model)
        model.data[...] *= beam.data[...]

    log.info(qa_image(model, context='create_low_test_image_from_gleam'))

    return model
Esempio n. 18
0
def invert_2d_base_timing(vis: Visibility, im: Image, dopsf: bool = False, normalize: bool = True, **kwargs) \
        -> (Image, numpy.ndarray, tuple):
    """ Invert using 2D convolution function, including w projection optionally

    Use the image im as a template. Do PSF in a separate call.

    This is at the bottom of the layering i.e. all transforms are eventually expressed in terms
    of this function. . Any shifting needed is performed here.

    :param vis: Visibility to be inverted
    :param im: image template (not changed)
    :param dopsf: Make the psf instead of the dirty image
    :param normalize: Normalize by the sum of weights (True)
    :return: resulting image

    """
    opt = get_parameter(kwargs, 'opt', False)
    if not opt:
        log.debug('Using original algorithm')
    else:
        log.debug('Using optimized algorithm')

    if not isinstance(vis, Visibility):
        svis = coalesce_visibility(vis, **kwargs)
    else:
        svis = copy_visibility(vis)

    if dopsf:
        svis.data['vis'] = numpy.ones_like(svis.data['vis'])

    svis = shift_vis_to_image(svis, im, tangent=True, inverse=False)

    nchan, npol, ny, nx = im.data.shape

    padding = {}
    if get_parameter(kwargs, "padding", False):
        padding = {'padding': get_parameter(kwargs, "padding", False)}
    spectral_mode, vfrequencymap = get_frequency_map(svis, im, opt)
    polarisation_mode, vpolarisationmap = get_polarisation_map(svis, im)
    uvw_mode, shape, padding, vuvwmap = get_uvw_map(svis, im, **padding)
    kernel_name, gcf, vkernellist = get_kernel_list(svis, im, **kwargs)

    # Optionally pad to control aliasing
    imgridpad = numpy.zeros(
        [nchan, npol,
         int(round(padding * ny)),
         int(round(padding * nx))],
        dtype='complex')

    # Use original algorithm
    if not opt:
        time_grid = -time.time()
        imgridpad, sumwt = convolutional_grid(vkernellist, imgridpad,
                                              svis.data['vis'],
                                              svis.data['imaging_weight'],
                                              vuvwmap, vfrequencymap,
                                              vpolarisationmap)
        time_grid += time.time()
    # Use optimized algorithm
    else:
        time_grid = -time.time()
        kernel_indices, kernels = vkernellist
        ks0, ks1, ks2, ks3 = kernels[0].shape
        kernels_c = numpy.zeros((len(kernels), ks0, ks1, ks2, ks3),
                                dtype=kernels[0].dtype)
        for i in range(len(kernels)):
            kernels_c[i, ...] = kernels[i]

        vfrequencymap_c = numpy.array(vfrequencymap, dtype=numpy.int32)
        sumwt = numpy.zeros((imgridpad.shape[0], imgridpad.shape[1]),
                            dtype=numpy.float64)

        convolutional_grid_c(imgridpad, sumwt, native_order(svis.data['vis']),
                             native_order(svis.data['imaging_weight']),
                             native_order(kernels_c),
                             native_order(kernel_indices),
                             native_order(vuvwmap),
                             native_order(vfrequencymap_c))
        time_grid += time.time()

    # Fourier transform the padded grid to image, multiply by the gridding correction
    # function, and extract the unpadded inner part.

    # Normalise weights for consistency with transform
    sumwt /= float(padding * int(round(padding * nx)) * ny)

    imaginary = get_parameter(kwargs, "imaginary", False)
    if imaginary:
        log.debug("invert_2d_base: retaining imaginary part of dirty image")
        result = extract_mid(ifft(imgridpad) * gcf, npixel=nx)
        resultreal = create_image_from_array(result.real, im.wcs)
        resultimag = create_image_from_array(result.imag, im.wcs)
        if normalize:
            resultreal = normalize_sumwt(resultreal, sumwt)
            resultimag = normalize_sumwt(resultimag, sumwt)
        return resultreal, sumwt, resultimag
    else:
        # Use original algorithm
        if not opt:
            time_ifft = -time.time()
            inarr = ifft(imgridpad)
            time_ifft += time.time()

        # Use optimized algorithm
        else:
            time_ifft = -time.time()
            inarr = numpy.zeros(imgridpad.shape, dtype=imgridpad.dtype)
            ifft_c(inarr, imgridpad)
            time_ifft += time.time()

        result = extract_mid(numpy.real(inarr) * gcf, npixel=nx)
        resultimage = create_image_from_array(result, im.wcs)
        if normalize:
            resultimage = normalize_sumwt(resultimage, sumwt)
        return resultimage, sumwt, (time_grid, time_ifft)
Esempio n. 19
0
def create_low_test_image_from_gleam(npixel=512, polarisation_frame=PolarisationFrame("stokesI"), cellsize=0.000015,
                                     frequency=numpy.array([1e8]), channel_bandwidth=numpy.array([1e6]),
                                     phasecentre=None, kind='cubic'):
    """Create LOW test image from the GLEAM survey

    Stokes I is estimated from a cubic spline fit to the measured fluxes. The polarised flux is always zero.
    
    See http://www.mwatelescope.org/science/gleam-survey The catalog is available from Vizier.
    
    VIII/100   GaLactic and Extragalactic All-sky MWA survey  (Hurley-Walker+, 2016)

    GaLactic and Extragalactic All-sky Murchison Wide Field Array (GLEAM) survey. I: A low-frequency extragalactic
    catalogue. Hurley-Walker N., et al., Mon. Not. R. Astron. Soc., 464, 1146-1167 (2017), 2017MNRAS.464.1146H

    :param npixel: Number of pixels
    :param polarisation_frame: Polarisation frame (default PolarisationFrame("stokesI"))
    :param cellsize: cellsize in radians
    :param frequency:
    :param channel_bandwidth: Channel width (Hz)
    :param phasecentre: phasecentre (SkyCoord)
    :param kind: Kind of interpolation (see scipy.interpolate.interp1d) Default: linear
    :returns: Image
    
    """
    
    fitsfile = arl_path("data/models/GLEAM_EGC.fits")
    
    hdulist = fits.open(fitsfile, lazy_load_hdus=False)
    recs = hdulist[1].data[0].array
    ras = recs['RAJ2000']
    decs = recs['DEJ2000']
    
    if phasecentre is None:
        phasecentre = SkyCoord(ra=+15.0 * u.deg, dec=-35.0 * u.deg, frame='icrs', equinox=2000.0)
    
    if polarisation_frame is None:
        polarisation_frame = PolarisationFrame("stokesI")
    
    npol = polarisation_frame.npol
    
    nchan = len(frequency)
    
    shape = [nchan, npol, npixel, npixel]
    w = WCS(naxis=4)
    # The negation in the longitude is needed by definition of RA, DEC
    w.wcs.cdelt = [-cellsize * 180.0 / numpy.pi, cellsize * 180.0 / numpy.pi, 1.0, channel_bandwidth[0]]
    w.wcs.crpix = [npixel // 2, npixel // 2, 1.0, 1.0]
    w.wcs.ctype = ["RA---SIN", "DEC--SIN", 'STOKES', 'FREQ']
    w.wcs.crval = [phasecentre.ra.deg, phasecentre.dec.deg, 1.0, frequency[0]]
    w.naxis = 4
    
    w.wcs.radesys = 'ICRS'
    w.wcs.equinox = 2000.0
    
    model = create_image_from_array(numpy.zeros(shape), w, polarisation_frame=polarisation_frame)
    
    p = w.sub(2).wcs_world2pix(numpy.array(ras), numpy.array(decs), 1)
    
    p = numpy.array(p)
    mask = numpy.isfinite(p[0, :])
    p = [p[0, mask], p[1, mask]]
    
    ip = numpy.round(p).astype('int')
    ok = numpy.where((0 <= ip[0, :]) & (npixel > ip[0, :]) & (0 <= ip[1, :]) & (npixel > ip[1, :]))[0]
    ps = ip[:, ok]
    
    # For every source, we read all measured fluxes and interpolate to the
    # required frequencies
    fluxes = []
    gleam_freqs = numpy.array([76, 84, 92, 99, 107, 115, 122, 130, 143, 151, 158, 166, 174, 181, 189, 197, 204,
                               212, 220, 227])
    for source in ok:
        this_source_fluxes = numpy.zeros(len(gleam_freqs))
        
        for i, f in enumerate(gleam_freqs):
            this_source_fluxes[i] = recs['int_flux_%03d' % (f)][source]
        
        fint = interpolate.interp1d(gleam_freqs * 1.0e6, this_source_fluxes, kind=kind)
        
        fluxes.append(fint(frequency))
    
    fluxes = numpy.array(fluxes)
    actual_flux = numpy.sum(fluxes)
    
    log.info('create_low_test_image_from_gleam: %d sources inside the image' % (ps.shape[1]))
    
    log.info('create_low_test_image_from_gleam: Average flux per channel in image = %.3f' % (actual_flux/float(nchan)))
    for iflux, flux in enumerate(fluxes):
        if not numpy.isnan(flux).any() and flux.all() > 0.0:
            model.data[:, 0, ps[1, iflux], ps[0, iflux]] = flux[:]
            
    hdulist.close()
    
    return model
Esempio n. 20
0
def invert_2d_base(vis: Visibility, im: Image, dopsf: bool = False, normalize: bool = True, **kwargs) \
        -> (Image, numpy.ndarray):
    """ Invert using 2D convolution function, including w projection optionally

    Use the image im as a template. Do PSF in a separate call.

    This is at the bottom of the layering i.e. all transforms are eventually expressed in terms
    of this function. . Any shifting needed is performed here.

    :param vis: Visibility to be inverted
    :param im: image template (not changed)
    :param dopsf: Make the psf instead of the dirty image
    :param normalize: Normalize by the sum of weights (True)
    :return: resulting image

    """
    if type(vis) is not Visibility:
        svis = coalesce_visibility(vis, **kwargs)
    else:
        svis = copy_visibility(vis)

    if dopsf:
        svis.data['vis'] = numpy.ones_like(svis.data['vis'])

    svis = shift_vis_to_image(svis, im, tangent=True, inverse=False)

    nchan, npol, ny, nx = im.data.shape

    spectral_mode, vfrequencymap = get_frequency_map(svis, im)
    polarisation_mode, vpolarisationmap = get_polarisation_map(
        svis, im, **kwargs)
    uvw_mode, shape, padding, vuvwmap = get_uvw_map(svis, im, **kwargs)
    kernel_name, gcf, vkernellist = get_kernel_list(svis, im, **kwargs)

    # Optionally pad to control aliasing
    imgridpad = numpy.zeros(
        [nchan, npol,
         int(round(padding * ny)),
         int(round(padding * nx))],
        dtype='complex')
    imgridpad, sumwt = convolutional_grid(vkernellist, imgridpad,
                                          svis.data['vis'],
                                          svis.data['imaging_weight'], vuvwmap,
                                          vfrequencymap, vpolarisationmap)

    # Fourier transform the padded grid to image, multiply by the gridding correction
    # function, and extract the unpadded inner part.

    # Normalise weights for consistency with transform
    sumwt /= float(padding * int(round(padding * nx)) * ny)

    imaginary = get_parameter(kwargs, "imaginary", False)
    if imaginary:
        log.debug("invert_2d_base: retaining imaginary part of dirty image")
        result = extract_mid(ifft(imgridpad) * gcf, npixel=nx)
        resultreal = create_image_from_array(result.real, im.wcs)
        resultimag = create_image_from_array(result.imag, im.wcs)
        if normalize:
            resultreal = normalize_sumwt(resultreal, sumwt)
            resultimag = normalize_sumwt(resultimag, sumwt)
        return resultreal, sumwt, resultimag
    else:
        result = extract_mid(numpy.real(ifft(imgridpad)) * gcf, npixel=nx)
        resultimage = create_image_from_array(result, im.wcs)
        if normalize:
            resultimage = normalize_sumwt(resultimage, sumwt)
        return resultimage, sumwt
Esempio n. 21
0
def create_low_test_image_from_s3(npixel=16384,
                                  polarisation_frame=PolarisationFrame(
                                      "stokesI"),
                                  cellsize=0.000015,
                                  frequency=numpy.array([1e8]),
                                  channel_bandwidth=numpy.array([1e6]),
                                  phasecentre=None,
                                  fov=20) -> Image:
    """Create LOW test image from S3

    The input catalog was generated at http://s-cubed.physics.ox.ac.uk/s3_sex using the following query::
        Database: s3_sex
        SQL: select * from Galaxies where (pow(10,itot_151)*1000 > 1.0) and (right_ascension between -5 and 5) and (declination between -5 and 5);;

    Number of rows returned: 29966

    There are three possible tables to use::

        data/models/S3_151MHz_10deg.csv, use fov=10
        data/models/S3_151MHz_20deg.csv, use fov=20
        data/models/S3_151MHz_40deg.csv, use fov=40

    The component spectral index is calculated from the 610MHz and 151MHz, and then calculated for the specified
    frequencies.

    If polarisation_frame is not stokesI then the image will a polarised axis but the values will be zero.

    :param npixel: Number of pixels
    :param polarisation_frame: Polarisation frame (default PolarisationFrame("stokesI"))
    :param cellsize: cellsize in radians
    :param frequency:
    :param channel_bandwidth: Channel width (Hz)
    :param phasecentre: phasecentre (SkyCoord)
    :param fov: fov table to use
    :return: Image
    """

    ras = []
    decs = []
    fluxes = []

    if phasecentre is None:
        phasecentre = SkyCoord(ra=+180.0 * u.deg,
                               dec=-60.0 * u.deg,
                               frame='icrs',
                               equinox='J2000')

    if polarisation_frame is None:
        polarisation_frame = PolarisationFrame("I")

    npol = polarisation_frame.npol

    nchan = len(frequency)

    shape = [nchan, npol, npixel, npixel]
    w = WCS(naxis=4)
    # The negation in the longitude is needed by definition of RA, DEC
    w.wcs.cdelt = [
        -cellsize * 180.0 / numpy.pi, cellsize * 180.0 / numpy.pi, 1.0,
        channel_bandwidth[0]
    ]
    w.wcs.crpix = [npixel // 2 + 1, npixel // 2 + 1, 1.0, 1.0]
    w.wcs.ctype = ["RA---SIN", "DEC--SIN", 'STOKES', 'FREQ']
    w.wcs.crval = [phasecentre.ra.deg, phasecentre.dec.deg, 1.0, frequency[0]]
    w.naxis = 4

    w.wcs.radesys = 'ICRS'
    w.wcs.equinox = 2000.0

    model = create_image_from_array(numpy.zeros(shape),
                                    w,
                                    polarisation_frame=polarisation_frame)

    assert fov in [10, 20,
                   40], "Field of view invalid: use one of %s" % ([10, 20, 40])
    with open(arl_path('data/models/S3_151MHz_%ddeg.csv' % (fov))) as csvfile:
        readCSV = csv.reader(csvfile, delimiter=',')
        r = 0
        for row in readCSV:
            # Skip first row
            if r > 0:
                ra = float(row[4]) + phasecentre.ra.deg
                dec = float(row[5]) + phasecentre.dec.deg
                alpha = (float(row[10]) - float(row[9])) / numpy.log10(
                    610.0 / 151.0)
                flux = numpy.power(10, float(row[9])) * numpy.power(
                    frequency / 1.51e8, alpha)
                ras.append(ra)
                decs.append(dec)
                fluxes.append(flux)
            r += 1

    csvfile.close()

    p = w.sub(2).wcs_world2pix(numpy.array(ras), numpy.array(decs), 1)
    total_flux = numpy.sum(fluxes)
    fluxes = numpy.array(fluxes)
    ip = numpy.round(p).astype('int')
    ok = numpy.where((0 <= ip[0, :]) & (npixel > ip[0, :]) & (0 <= ip[1, :])
                     & (npixel > ip[1, :]))[0]
    ps = ip[:, ok]
    fluxes = fluxes[ok]
    actual_flux = numpy.sum(fluxes)

    log.info('create_low_test_image_from_s3: %d sources inside the image' %
             (ps.shape[1]))

    log.info(
        'create_low_test_image_from_s3: average channel flux in S3 model = %.3f, actual average channel flux in '
        'image = %.3f' %
        (total_flux / float(nchan), actual_flux / float(nchan)))
    for chan in range(nchan):
        for iflux, flux in enumerate(fluxes):
            model.data[chan, 0, ps[1, iflux], ps[0, iflux]] = flux[chan]

    return model
Esempio n. 22
0
def create_image_from_visibility(vis, **kwargs) -> Image:
    """Make an empty image from params and Visibility

    :param vis:
    :param phasecentre: Phasecentre (Skycoord)
    :param channel_bandwidth: Channel width (Hz)
    :param cellsize: Cellsize (radians)
    :param npixel: Number of pixels on each axis (512)
    :param frame: Coordinate frame for WCS (ICRS)
    :param equinox: Equinox for WCS (2000.0)
    :param nchan: Number of image channels (Default is 1 -> MFS)
    :return: image
    """
    assert isinstance(vis, Visibility) or isinstance(vis, BlockVisibility), \
        "vis is not a Visibility or a BlockVisibility: %r" % (vis)

    log.info(
        "create_image_from_visibility: Parsing parameters to get definition of WCS"
    )

    imagecentre = get_parameter(kwargs, "imagecentre", vis.phasecentre)
    phasecentre = get_parameter(kwargs, "phasecentre", vis.phasecentre)

    # Spectral processing options
    ufrequency = numpy.unique(vis.frequency)
    vnchan = len(ufrequency)

    frequency = get_parameter(kwargs, "frequency", vis.frequency)
    inchan = get_parameter(kwargs, "nchan", vnchan)
    reffrequency = frequency[0] * units.Hz
    channel_bandwidth = get_parameter(kwargs, "channel_bandwidth",
                                      vis.channel_bandwidth[0]) * units.Hz

    if (inchan == vnchan) and vnchan > 1:
        log.info(
            "create_image_from_visibility: Defining %d channel Image at %s, starting frequency %s, and bandwidth %s"
            % (inchan, imagecentre, reffrequency, channel_bandwidth))
    elif (inchan == 1) and vnchan > 1:
        assert numpy.abs(channel_bandwidth.value
                         ) > 0.0, "Channel width must be non-zero for mfs mode"
        log.info(
            "create_image_from_visibility: Defining single channel MFS Image at %s, starting frequency %s, "
            "and bandwidth %s" %
            (imagecentre, reffrequency, channel_bandwidth))
    elif inchan > 1 and vnchan > 1:
        assert numpy.abs(channel_bandwidth.value
                         ) > 0.0, "Channel width must be non-zero for mfs mode"
        log.info(
            "create_image_from_visibility: Defining multi-channel MFS Image at %s, starting frequency %s, "
            "and bandwidth %s" %
            (imagecentre, reffrequency, channel_bandwidth))
    elif (inchan == 1) and (vnchan == 1):
        assert numpy.abs(channel_bandwidth.value
                         ) > 0.0, "Channel width must be non-zero for mfs mode"
        log.info(
            "create_image_from_visibility: Defining single channel Image at %s, starting frequency %s, "
            "and bandwidth %s" %
            (imagecentre, reffrequency, channel_bandwidth))
    else:
        raise ValueError(
            "create_image_from_visibility: unknown spectral mode ")

    # Image sampling options
    npixel = get_parameter(kwargs, "npixel", 512)
    uvmax = numpy.max((numpy.abs(vis.data['uvw'][:, 0:1])))
    if isinstance(vis, BlockVisibility):
        uvmax *= numpy.max(frequency) / constants.c.to('m/s').value
    log.info("create_image_from_visibility: uvmax = %f wavelengths" % uvmax)
    criticalcellsize = 1.0 / (uvmax * 2.0)
    log.info(
        "create_image_from_visibility: Critical cellsize = %f radians, %f degrees"
        % (criticalcellsize, criticalcellsize * 180.0 / numpy.pi))
    cellsize = get_parameter(kwargs, "cellsize", 0.5 * criticalcellsize)
    log.info(
        "create_image_from_visibility: Cellsize          = %f radians, %f degrees"
        % (cellsize, cellsize * 180.0 / numpy.pi))
    override_cellsize = get_parameter(kwargs, "override_cellsize", True)
    if override_cellsize and cellsize > criticalcellsize:
        log.info(
            "create_image_from_visibility: Resetting cellsize %f radians to criticalcellsize %f radians"
            % (cellsize, criticalcellsize))
        cellsize = criticalcellsize
    pol_frame = get_parameter(kwargs, "polarisation_frame",
                              PolarisationFrame("stokesI"))
    inpol = pol_frame.npol

    # Now we can define the WCS, which is a convenient place to hold the info above
    # Beware of python indexing order! wcs and the array have opposite ordering
    shape = [inchan, inpol, npixel, npixel]
    w = wcs.WCS(naxis=4)
    # The negation in the longitude is needed by definition of RA, DEC
    w.wcs.cdelt = [
        -cellsize * 180.0 / numpy.pi, cellsize * 180.0 / numpy.pi, 1.0,
        channel_bandwidth.to(units.Hz).value
    ]
    # The numpy definition of the phase centre of an FFT is n // 2 (0 - rel) so that's what we use for
    # the reference pixel. We have to use 0 rel everywhere.
    w.wcs.crpix = [npixel // 2, npixel // 2, 1.0, 1.0]
    w.wcs.ctype = ["RA---SIN", "DEC--SIN", 'STOKES', 'FREQ']
    w.wcs.crval = [
        phasecentre.ra.deg, phasecentre.dec.deg, 1.0,
        reffrequency.to(units.Hz).value
    ]
    w.naxis = 4

    w.wcs.radesys = get_parameter(kwargs, 'frame', 'ICRS')
    w.wcs.equinox = get_parameter(kwargs, 'equinox', 2000.0)

    return create_image_from_array(numpy.zeros(shape),
                                   wcs=w,
                                   polarisation_frame=pol_frame)
Esempio n. 23
0
def deconvolve_cube_complex(dirty: Image, psf: Image, **kwargs) -> (Image, Image):
    """ Clean using the complex Hogbom algorithm for polarised data (2016MNRAS.462.3483P)
        
    The algorithm available is:
    hogbom-complex: See: Pratley L. & Johnston-Hollitt M., (2016), MNRAS, 462, 3483.
    
    This code is based upon the deconvolve_cube code for standard Hogbom clean available in ARL.
    
    Args:
    dirty (numpy array): The dirty image, i.e., the image to be deconvolved.
    psf (numpy array): The point spread-function.
    window (float): Regions where clean components are allowed. If True, entire dirty Image is allowed.
    algorithm (str): Cleaning algorithm: 'hogbom-complex' only.
    gain (float): The "loop gain", i.e., the fraction of the brightest pixel that is removed in each iteration.
    threshold (float): Cleaning stops when the maximum of the absolute deviation of the residual is less than this value.
    niter (int): Maximum number of components to make if the threshold `thresh` is not hit.
    fractional_threshold (float): The predefined fractional threshold at which to stop cleaning.

    Returns:
    comp_image: clean component image.
    residual_image: residual image.
    """
    assert isinstance(dirty, Image), "Type is %s" % (type(dirty))
    assert isinstance(psf, Image), "Type is %s" % (type(psf))
    
    window_shape = get_parameter(kwargs, 'window_shape', None)
    if window_shape == 'quarter':
        qx = dirty.shape[3] // 4
        qy = dirty.shape[2] // 4
        window = np.zeros_like(dirty.data)
        window[..., (qy + 1):3 * qy, (qx + 1):3 * qx] = 1.0
        log.info('deconvolve_cube_complex: Cleaning inner quarter of each sky plane')
    else:
        window = None
    
    psf_support = get_parameter(kwargs, 'psf_support', None)
    if isinstance(psf_support, int):
        if (psf_support < psf.shape[2] // 2) and ((psf_support < psf.shape[3] // 2)):
            centre = [psf.shape[2] // 2, psf.shape[3] // 2]
            psf.data = psf.data[..., (centre[0] - psf_support):(centre[0] + psf_support),
                                (centre[1] - psf_support):(centre[1] + psf_support)]
            log.info('deconvolve_cube_complex: PSF support = +/- %d pixels' % (psf_support))

    algorithm = get_parameter(kwargs, 'algorithm', 'msclean')
    
    if algorithm == 'hogbom-complex':
        log.info("deconvolve_cube_complex: Hogbom-complex clean of each polarisation and channel separately")
        gain = get_parameter(kwargs, 'gain', 0.7)
        assert 0.0 < gain < 2.0, "Loop gain must be between 0 and 2"
        thresh = get_parameter(kwargs, 'threshold', 0.0)
        assert thresh >= 0.0
        niter = get_parameter(kwargs, 'niter', 100)
        assert niter > 0
        fracthresh = get_parameter(kwargs, 'fractional_threshold', 0.1)
        assert 0.0 <= fracthresh < 1.0
        
        comp_array = np.zeros(dirty.data.shape)
        residual_array = np.zeros(dirty.data.shape)
        for channel in range(dirty.data.shape[0]):
            for pol in range(dirty.data.shape[1]):
                if pol == 0 or pol == 3:
                    if psf.data[channel, pol, :, :].max():
                        log.info("deconvolve_cube_complex: Processing pol %d, channel %d" % (pol, channel))
                        if window is None:
                            comp_array[channel, pol, :, :], residual_array[channel, pol, :, :] = \
                                hogbom(dirty.data[channel, pol, :, :], psf.data[channel, pol, :, :],
                                       None, gain, thresh, niter, fracthresh)
                        else:
                            comp_array[channel, pol, :, :], residual_array[channel, pol, :, :] = \
                                hogbom(dirty.data[channel, pol, :, :], psf.data[channel, pol, :, :],
                                       window[channel, pol, :, :], gain, thresh, niter, fracthresh)
                    else:
                        log.info("deconvolve_cube_complex: Skipping pol %d, channel %d" % (pol, channel))
                if pol == 1:
                    if psf.data[channel, 1:2, :, :].max():
                        log.info("deconvolve_cube_complex: Processing pol 1 and 2, channel %d" % (channel))
                        if window is None:
                            comp_array[channel, 1, :, :], comp_array[channel, 2, :, :], residual_array[channel, 1, :, :], residual_array[channel, 2, :, :] = hogbom_complex(dirty.data[channel, 1, :, :], dirty.data[channel, 2, :, :], psf.data[channel, 1, :, :], psf.data[channel, 2, :, :], None, gain, thresh, niter, fracthresh)
                        else:
                            comp_array[channel, 1, :, :], comp_array[channel, 2, :, :], residual_array[channel, 1, :, :], residual_array[channel, 2, :, :] = hogbom_complex(dirty.data[channel, 1, :, :], dirty.data[channel, 2, :, :], psf.data[channel, 1, :, :], psf.data[channel, 2, :, :], window[channel, pol, :, :], gain, thresh, niter, fracthresh)
                    else:
                        log.info("deconvolve_cube_complex: Skipping pol 1 and 2, channel %d" % (channel))
                if pol == 2:
                    continue
                
        comp_image = create_image_from_array(comp_array, dirty.wcs)
        residual_image = create_image_from_array(residual_array, dirty.wcs)

    else:
        raise ValueError('deconvolve_cube_complex: Unknown algorithm %s' % algorithm)
    
    return comp_image, residual_image