예제 #1
0
def create_vp_generic_numeric(model,
                              pointingcentre=None,
                              diameter=15.0,
                              blockage=0.0,
                              taper='gaussian',
                              edge=0.03162278,
                              zernikes=None,
                              padding=4,
                              use_local=True,
                              rho=0.0,
                              diff=0.0):
    """
    Make an image like model and fill it with an analytical model of the primary beam
    
    The elements of the analytical model are:
    - dish, optionally blocked
    - Gaussian taper, default is -12dB at the edge
    - Offset to pointing centre (optional)
    - zernikes in a list of dictionaries. Each list element is of the form {"coeff":0.1, "noll":5}. See aotools for
    more details
    - Output image can be in RA, DEC coordinates or AZELGEO coordinates (the default). use_local=True means to use
    AZELGEO coordinates centered on 0deg 0deg.
    
    The dish is zero padded according to padding and FFT'ed to get the voltage pattern.
    
    :param model:
    :param pointingcentre: SkyCoord of desired pointing centre
    :param diameter: Diameter of dish in metres
    :param blockage: Blockage of dish in metres
    :param taper: "Gaussian" or None
    :param edge: Value of taper at the end of the dish (default corresponds to -12dB)
    :param zernikes: Zernikes to be applied as phase across the dish (see above)
    :param padding: Pad the image by this amount
    :param use_local: Use local frame (AZELGEO)?
    :return:
    """
    beam = create_empty_image_like(model)
    nchan, npol, ny, nx = beam.shape
    padded_shape = [nchan, npol, padding * ny, padding * nx]
    padded_beam = pad_image(beam, padded_shape)
    padded_beam.data = numpy.zeros(padded_beam.data.shape, dtype='complex')
    _, _, pny, pnx = padded_beam.shape

    xfr = fft_image(padded_beam)
    cx, cy = xfr.wcs.sub(2).wcs.crpix[0] - 1, xfr.wcs.sub(2).wcs.crpix[1] - 1

    for chan in range(nchan):

        # The frequency axis is the second to last in the beam
        frequency = xfr.wcs.sub(['spectral']).wcs_pix2world([chan], 0)[0]
        wavelength = const.c.to('m s^-1').value / frequency

        scalex = xfr.wcs.sub(2).wcs.cdelt[0] * wavelength
        scaley = xfr.wcs.sub(2).wcs.cdelt[1] * wavelength
        # xx, yy in metres
        xx, yy = numpy.meshgrid(scalex * (range(pnx) - cx),
                                scaley * (range(pny) - cy))

        # rr in metres
        rr = numpy.sqrt(xx**2 + yy**2)
        for pol in range(npol):
            xfr.data[chan, pol, ...] = tapered_disk(rr,
                                                    diameter / 2.0,
                                                    blockage=blockage / 2.0,
                                                    edge=edge,
                                                    taper=taper)

        if pointingcentre is not None:
            # Correct for pointing centre
            pcx, pcy = pointingcentre.to_pixel(padded_beam.wcs, origin=0)
            pxx, pyy = numpy.meshgrid((range(pnx) - cx), (range(pny) - cy))
            phase = 2 * numpy.pi * ((pcx - cx) * pxx / float(pnx) +
                                    (pcy - cy) * pyy / float(pny))
            for pol in range(npol):
                xfr.data[chan, pol, ...] *= numpy.exp(1j * phase)

        if isinstance(zernikes, collections.Iterable):
            try:
                import aotools
            except ModuleNotFoundError:
                raise ModuleNotFoundError("aotools is not installed")

            ndisk = numpy.ceil(numpy.abs(diameter / scalex)).astype('int')[0]
            ndisk = 2 * ((ndisk + 1) // 2)
            phase = numpy.zeros([ndisk, ndisk])
            for zernike in zernikes:
                phase = zernike[
                    'coeff'] * aotools.functions.zernike.zernike_noll(
                        zernike['noll'], ndisk)

            # import matplotlib.pyplot as plt
            # plt.clf()
            # plt.imshow(phase)
            # plt.colorbar()
            # plt.show()
            #
            blc = pnx // 2 - ndisk // 2
            trc = pnx // 2 + ndisk // 2
            for pol in range(npol):
                xfr.data[chan, pol, blc:trc,
                         blc:trc] = xfr.data[chan, pol, blc:trc,
                                             blc:trc] * numpy.exp(1j * phase)

    padded_beam = fft_image(xfr, padded_beam)

    # Undo padding
    beam = create_empty_image_like(model)
    beam.data = padded_beam.data[...,
                                 (pny // 2 - ny // 2):(pny // 2 + ny // 2),
                                 (pnx // 2 - nx // 2):(pnx // 2 + nx // 2)]
    for chan in range(nchan):
        beam.data[chan, ...] /= numpy.max(numpy.abs(beam.data[chan, ...]))

    set_pb_header(beam, use_local=use_local)
    return beam
예제 #2
0
def create_vpterm_convolutionfunction(im,
                                      make_vp=None,
                                      oversampling=8,
                                      support=6,
                                      use_aaf=False,
                                      maxsupport=512,
                                      pa=None,
                                      normalise=True):
    """ Fill voltage pattern kernel projection kernel into a GridData.
    
    The makes the convolution function for gridding polarised data with a voltage
    pattern.

    :param im: Image template
    :param make_vp: Function to make the voltage pattern model image (hint: use a partial)
    :param oversampling: Oversampling of the convolution function in uv space
    :return: griddata correction Image, griddata kernel as GridData
    """
    if oversampling % 2 == 0:
        log.info("Setting oversampling to next greatest odd number {}".format(
            oversampling))
        oversampling += 1

    d2r = numpy.pi / 180.0

    # We only need the griddata correction function for the PSWF so we make
    # it for the shape of the image
    nchan, npol, ony, onx = im.data.shape

    assert isinstance(im, Image)
    # Calculate the template convolution kernel.
    cf = create_convolutionfunction_from_image(im,
                                               oversampling=oversampling,
                                               support=support)

    cf_shape = list(cf.data.shape)
    cf.data = numpy.zeros(cf_shape).astype('complex')

    assert isinstance(oversampling, int)
    assert oversampling > 0

    nx = max(maxsupport, 2 * oversampling * support)
    ny = max(maxsupport, 2 * oversampling * support)

    qnx = nx // oversampling
    qny = ny // oversampling

    cf.data[...] = 0.0

    subim = copy_image(im)
    ccell = onx * numpy.abs(d2r * subim.wcs.wcs.cdelt[0]) / qnx

    subim.data = numpy.zeros([nchan, npol, qny, qnx])
    subim.wcs.wcs.cdelt[0] = -ccell / d2r
    subim.wcs.wcs.cdelt[1] = +ccell / d2r
    subim.wcs.wcs.crpix[0] = qnx // 2 + 1.0
    subim.wcs.wcs.crpix[1] = qny // 2 + 1.0

    vp = make_vp(subim)

    if pa is not None:
        rvp = convert_azelvp_to_radec(vp, subim, pa)
    else:
        rvp = convert_azelvp_to_radec(vp, subim, 0.0)

    if use_aaf:
        this_pswf_gcf, _ = create_pswf_convolutionfunction(subim,
                                                           oversampling=1,
                                                           support=6)
        rvp.data /= this_pswf_gcf.data

    # We might need to work with a larger image
    padded_shape = [nchan, npol, ny, nx]
    paddedplane = pad_image(rvp, padded_shape)
    paddedplane = fft_image(paddedplane)

    ycen, xcen = ny // 2, nx // 2
    for y in range(oversampling):
        ybeg = y + ycen + (support * oversampling) // 2 - oversampling // 2
        yend = y + ycen - (support * oversampling) // 2 - oversampling // 2
        # vv = range(ybeg, yend, -oversampling)
        for x in range(oversampling):
            xbeg = x + xcen + (support * oversampling) // 2 - oversampling // 2
            xend = x + xcen - (support * oversampling) // 2 - oversampling // 2

            # uu = range(xbeg, xend, -oversampling)
            cf.data[..., 0, y, x, :, :] = \
                paddedplane.data[..., ybeg:yend:-oversampling, xbeg:xend:-oversampling]

    if normalise:
        cf.data /= numpy.sum(
            numpy.real(cf.data[0, 0, 0, oversampling // 2,
                               oversampling // 2, :, :]))
    cf.data = numpy.conjugate(cf.data)

    if use_aaf:
        pswf_gcf, _ = create_pswf_convolutionfunction(im,
                                                      oversampling=1,
                                                      support=6)
    else:
        pswf_gcf = create_empty_image_like(im)
        pswf_gcf.data[...] = 1.0

    return pswf_gcf, cf
예제 #3
0
 def get_test_image(self):
     testim = create_test_image(
         polarisation_frame=PolarisationFrame('stokesI'))
     return pad_image(testim, [1, 1, 512, 512])
예제 #4
0
def create_awterm_convolutionfunction(im,
                                      make_pb=None,
                                      nw=1,
                                      wstep=1e15,
                                      oversampling=9,
                                      support=8,
                                      use_aaf=True,
                                      maxsupport=512,
                                      pa=None,
                                      normalise=True):
    """ Fill AW projection kernel into a GridData.

    :param im: Image template
    :param make_pb: Function to make the primary beam model image (hint: use a partial)
    :param nw: Number of w planes
    :param wstep: Step in w (wavelengths)
    :param oversampling: Oversampling of the convolution function in uv space
    :return: griddata correction Image, griddata kernel as GridData
    """
    if oversampling % 2 == 0:
        oversampling += 1
        log.info("Setting oversampling to next greatest odd number {}".format(
            oversampling))

    d2r = numpy.pi / 180.0

    # We only need the griddata correction function for the PSWF so we make
    # it for the shape of the image
    nchan, npol, ony, onx = im.data.shape

    assert isinstance(im, Image)
    # Calculate the template convolution kernel.
    cf = create_convolutionfunction_from_image(im,
                                               oversampling=oversampling,
                                               support=support)

    cf_shape = list(cf.data.shape)
    assert nw > 0, "Number of w planes must be greater than zero"
    cf_shape[2] = nw
    cf.data = numpy.zeros(cf_shape).astype('complex')

    cf.grid_wcs.wcs.crpix[4] = nw // 2 + 1.0
    cf.grid_wcs.wcs.cdelt[4] = wstep
    cf.grid_wcs.wcs.ctype[4] = 'WW'
    if numpy.abs(wstep) > 0.0:
        w_list = cf.grid_wcs.sub([5]).wcs_pix2world(range(nw), 0)[0]
    else:
        w_list = [0.0]

    assert isinstance(oversampling, int)
    assert oversampling > 0

    nx = max(maxsupport, 2 * oversampling * support)
    ny = max(maxsupport, 2 * oversampling * support)

    qnx = nx // oversampling
    qny = ny // oversampling

    cf.data[...] = 0.0

    subim = copy_image(im)
    ccell = onx * numpy.abs(d2r * subim.wcs.wcs.cdelt[0]) / qnx

    subim.data = numpy.zeros([nchan, npol, qny, qnx])
    subim.wcs.wcs.cdelt[0] = -ccell / d2r
    subim.wcs.wcs.cdelt[1] = +ccell / d2r
    subim.wcs.wcs.crpix[0] = qnx // 2 + 1.0
    subim.wcs.wcs.crpix[1] = qny // 2 + 1.0

    if use_aaf:
        this_pswf_gcf, _ = create_pswf_convolutionfunction(subim,
                                                           oversampling=1,
                                                           support=6)
        norm = 1.0 / this_pswf_gcf.data
    else:
        norm = 1.0

    if make_pb is not None:
        pb = make_pb(subim)

        if pa is not None:
            rpb = convert_azelvp_to_radec(pb, subim, pa)
        else:
            rpb = convert_azelvp_to_radec(pb, subim, 0.0)

        norm *= rpb.data

    # We might need to work with a larger image
    padded_shape = [nchan, npol, ny, nx]
    thisplane = copy_image(subim)
    thisplane.data = numpy.zeros(thisplane.shape, dtype='complex')
    for z, w in enumerate(w_list):
        thisplane.data[...] = 0.0 + 0.0j
        thisplane = create_w_term_like(thisplane, w, dopol=True)
        thisplane.data *= norm
        paddedplane = pad_image(thisplane, padded_shape)
        paddedplane = fft_image(paddedplane)

        ycen, xcen = ny // 2, nx // 2
        for y in range(oversampling):
            ybeg = y + ycen + (support * oversampling) // 2 - oversampling // 2
            yend = y + ycen - (support * oversampling) // 2 - oversampling // 2
            # vv = range(ybeg, yend, -oversampling)
            for x in range(oversampling):
                xbeg = x + xcen + (support *
                                   oversampling) // 2 - oversampling // 2
                xend = x + xcen - (support *
                                   oversampling) // 2 - oversampling // 2

                # uu = range(xbeg, xend, -oversampling)
                cf.data[..., z, y,
                        x, :, :] = paddedplane.data[...,
                                                    ybeg:yend:-oversampling,
                                                    xbeg:xend:-oversampling]
                # for chan in range(nchan):
                #     for pol in range(npol):
                #         cf.data[chan, pol, z, y, x, :, :] = paddedplane.data[chan, pol, :, :][vv, :][:, uu]

    if normalise:
        norm = numpy.zeros([nchan, npol, oversampling, oversampling])
        for y in range(oversampling):
            for x in range(oversampling):
                # uu = range(xbeg, xend, -oversampling)
                norm[..., y, x] = numpy.sum(numpy.real(cf.data[:, :, 0, y,
                                                               x, :, :]),
                                            axis=(-2, -1))
        for z, _ in enumerate(w_list):
            for y in range(oversampling):
                for x in range(oversampling):
                    cf.data[:, :, z, y, x] /= norm[..., y,
                                                   x][..., numpy.newaxis,
                                                      numpy.newaxis]
    cf.data = numpy.conjugate(cf.data)

    if use_aaf:
        pswf_gcf, _ = create_pswf_convolutionfunction(im,
                                                      oversampling=1,
                                                      support=6)
    else:
        pswf_gcf = create_empty_image_like(im)
        pswf_gcf.data[...] = 1.0

    return pswf_gcf, cf