示例#1
0
 def test_average_chunks2_1d_trans(self):
     arr = numpy.linspace(0.0, 100.0, 11).reshape([11, 1])  # pylint: disable=no-member
     wts = numpy.ones_like(arr)
     carr, cwts = average_chunks2(arr, wts, (2, 1))
     assert len(carr) == len(cwts)
     answerarr = numpy.array([[5.], [25.], [45.], [65.0], [85.0], [100.0]])
     answerwts = numpy.array([[2.0], [2.0], [2.0], [2.0], [2.0], [1.0]])
     numpy.testing.assert_array_equal(carr, answerarr)
     numpy.testing.assert_array_equal(cwts, answerwts)
示例#2
0
 def test_average_chunks2_2d(self):
     arr = numpy.linspace(0.0, 120.0, 121).reshape(11, 11)  # pylint: disable=no-member
     wts = numpy.ones_like(arr)
     carr, cwts = average_chunks2(arr, wts, (5, 2))
     assert len(carr) == len(cwts)
     answerarr = numpy.array([32., 87., 120.])
     answerwts = numpy.array([5., 5., 1.])
     numpy.testing.assert_array_equal(carr[:, 5], answerarr)
     numpy.testing.assert_array_equal(cwts[:, 5], answerwts)
示例#3
0
 def test_average_chunks2_1d(self):
     arr = numpy.linspace(0.0, 100.0, 11).reshape([1, 11])  # pylint: disable=no-member
     wts = numpy.ones_like(arr)
     carr, cwts = average_chunks2(arr, wts, (1, 2))
     assert len(carr) == len(cwts)
     answerarr = numpy.array([[5., 25., 45., 65.0, 85.0, 100.0]])
     answerwts = numpy.array([[2.0, 2.0, 2.0, 2.0, 2.0, 1.0]])
     numpy.testing.assert_array_equal(carr, answerarr)
     numpy.testing.assert_array_equal(cwts, answerwts)
示例#4
0
def calculate_averaged_correlation(correlation, channel_width, time_width):
    """ Average the correlation in time and frequency

    :param correlation: Correlation(nant, nants, ntimes, nchan]
    :param channel_width: Number of channels to average
    :param time_width: Number of integrations to average
    :return:
    """
    wts = numpy.ones(correlation.shape, dtype='float')
    return average_chunks2(correlation, wts, (channel_width, time_width))[0]
示例#5
0
文件: coalesce.py 项目: Yonhua/rascil
 def sum_from_grid(arr):
     result = average_chunks2(
         arr, allpwtsgrid[:, a2, a1, :],
         (time_average[a2, a1], frequency_average[a2, a1]))
     return result[0] * result[0].size
示例#6
0
文件: coalesce.py 项目: Yonhua/rascil
 def average_from_grid(arr):
     return average_chunks2(
         arr, allpwtsgrid[:, a2, a1, :],
         (time_average[a2, a1], frequency_average[a2, a1]))[0]
示例#7
0
文件: coalesce.py 项目: Yonhua/rascil
def average_in_blocks(vis,
                      uvw,
                      wts,
                      imaging_wts,
                      times,
                      integration_time,
                      frequency,
                      channel_bandwidth,
                      time_coal=1.0,
                      max_time_coal=100,
                      frequency_coal=1.0,
                      max_frequency_coal=100):
    """ Average visibility in blocks
    
    :param vis:
    :param uvw:
    :param wts:
    :param imaging_wts:
    :param times:
    :param integration_time:
    :param frequency:
    :param channel_bandwidth:
    :param time_coal:
    :param max_time_coal:
    :param frequency_coal:
    :param max_frequency_coal:
    :return:
    """
    # Calculate the averaging factors for time and frequency making them the same for all times
    # for this baseline
    # Find the maximum possible baseline and then scale to this.

    # The input visibility is a block of shape [ntimes, nant, nant, nchan, npol]. We will map this
    # into rows like vis[npol] and with additional columns antenna1, antenna2, frequency

    ntimes, nant, _, nchan, npol = vis.shape

    times.dtype = numpy.float64

    # Original
    # Pol independent weighting
    # allpwtsgrid = numpy.sum(wts, axis=4)
    # # Pol and frequency independent weighting
    # allcpwtsgrid = numpy.sum(allpwtsgrid, axis=3)
    # # Pol and time independent weighting
    # alltpwtsgrid = numpy.sum(allpwtsgrid, axis=0)

    # Optimized
    allpwtsgrid = numpy.einsum('ijklm->ijkl', wts, optimize=True)
    allcpwtsgrid = numpy.einsum('ijkl->ijk', allpwtsgrid, optimize=True)
    alltpwtsgrid = numpy.einsum('ijkl->jkl', allpwtsgrid, optimize=True)

    # Now calculate on a baseline basis the time and frequency averaging. We do this by looking at
    # the maximum uv distance for all data and for a given baseline. The integration time and
    # channel bandwidth are scale appropriately.
    time_average = numpy.ones([nant, nant], dtype='int')
    frequency_average = numpy.ones([nant, nant], dtype='int')
    ua = numpy.arange(nant)

    # Original
    # uvmax = numpy.sqrt(numpy.max(uvw[..., 0] ** 2 + uvw[..., 1] ** 2 + uvw[..., 2] ** 2))
    # for a2 in ua:
    #     for a1 in ua:
    #         if allpwtsgrid[:, a2, a1, :].any() > 0.0:
    #             uvdist = numpy.max(numpy.sqrt(uvw[:, a2, a1, 0] ** 2 + uvw[:, a2, a1, 1] ** 2), axis=0)
    #             if uvdist > 0.0:
    #                 time_average[a2, a1] = min(max_time_coal,
    #                                            max(1, int(round((time_coal * uvmax / uvdist)))))
    #                 frequency_average[a2, a1] = min(max_frequency_coal,
    #                                                 max(1, int(round(frequency_coal * uvmax / uvdist))))
    #             else:
    #                 time_average[a2, a1] = max_time_coal
    #                 frequency_average[a2, a1] = max_frequency_coal

    # Optimized
    # Calculate uvdist instead of uvwdist
    uvwd = uvw[..., 0:2]
    uvdist = numpy.einsum('ijkm,ijkm->ijk', uvwd, uvwd, optimize=True)
    uvmax = numpy.sqrt(numpy.max(uvdist))

    # uvdist = numpy.sqrt(numpy.einsum('ijkm,ijkm->ijk', uvw, uvw, optimize=True))
    uvdist_max = numpy.sqrt(numpy.max(uvdist, axis=0))

    allpwtsgrid_bool = numpy.einsum('ijklm->jk', wts, optimize=True)
    mask = numpy.where(uvdist_max > 0.)
    mask0 = numpy.where(uvdist_max <= 0.)
    time_average[mask] = numpy.round((time_coal * uvmax / uvdist_max[mask]))
    time_average.dtype = numpy.int64
    time_average[mask0] = max_time_coal
    numpy.putmask(time_average, allpwtsgrid_bool == 0, 0)
    numpy.putmask(time_average, time_average < 1, 1)
    numpy.putmask(time_average, time_average > max_time_coal, max_time_coal)
    frequency_average[mask] = numpy.round(
        (frequency_coal * uvmax / uvdist_max[mask]))
    frequency_average.dtype = numpy.int64
    frequency_average[mask0] = max_frequency_coal
    numpy.putmask(frequency_average, allpwtsgrid_bool == 0, 0)
    numpy.putmask(frequency_average, frequency_average < 1, 1)
    numpy.putmask(frequency_average, frequency_average > max_frequency_coal,
                  max_frequency_coal)

    # See how many time chunks and frequency we need for each baseline. To do this we use the same averaging that
    # we will use later for the actual data_models. This tells us the number of chunks required for each baseline.
    frequency_grid, time_grid = numpy.meshgrid(frequency, times)
    channel_bandwidth_grid, integration_time_grid = numpy.meshgrid(
        channel_bandwidth, integration_time)
    cnvis = 0
    time_chunk_len = numpy.ones([nant, nant], dtype='int')
    frequency_chunk_len = numpy.ones([nant, nant], dtype='int')

    for a2 in ua:
        for a1 in ua:
            if (time_average[a2, a1] >
                    0) & (frequency_average[a2, a1] > 0 &
                          (allpwtsgrid[:, a2, a1, ...].any() > 0.0)):
                time_chunks, _ = average_chunks(times, allcpwtsgrid[:, a2, a1],
                                                time_average[a2, a1])
                time_chunk_len[a2, a1] = time_chunks.shape[0]
                frequency_chunks, _ = average_chunks(frequency,
                                                     alltpwtsgrid[a2, a1, :],
                                                     frequency_average[a2, a1])
                frequency_chunk_len[a2, a1] = frequency_chunks.shape[0]
                nrows = time_chunk_len[a2, a1] * frequency_chunk_len[a2, a1]
                cnvis += nrows

    # Now we know enough to define the output coalesced arrays. The output will be
    # successive a1, a2: [len_time_chunks[a2,a1], a2, a1, len_frequency_chunks[a2,a1]]
    ctime = numpy.zeros([cnvis])
    cfrequency = numpy.zeros([cnvis])
    cchannel_bandwidth = numpy.zeros([cnvis])
    cvis = numpy.zeros([cnvis, npol], dtype='complex')
    cwts = numpy.zeros([cnvis, npol])
    cimwts = numpy.zeros([cnvis, npol])
    cuvw = numpy.zeros([cnvis, 3])
    ca1 = numpy.zeros([cnvis], dtype='int')
    ca2 = numpy.zeros([cnvis], dtype='int')
    cintegration_time = numpy.zeros([cnvis])

    # For decoalescence we keep an index to map back to the original BlockVisibility
    rowgrid = numpy.zeros([ntimes, nant, nant, nchan], dtype='int')
    rowgrid.flat = range(rowgrid.size)

    cindex = numpy.zeros([rowgrid.size], dtype='int')

    # Now go through, chunking up the various arrays. Everything is converted into an array with
    # axes [time, channel] and then it is averaged over time and frequency chunks for
    # this baseline.
    # To aid decoalescence we will need an index of which output elements a given input element
    # contributes to. This is a many to one. The decoalescence will then just consist of using
    # this index to extract the coalesced value that a given input element contributes towards.

    visstart = 0
    for a2 in ua:
        for a1 in ua:
            nrows = time_chunk_len[a2, a1] * frequency_chunk_len[a2, a1]
            rows = slice(visstart, visstart + nrows)

            cindex.flat[rowgrid[:, a2, a1, :]] = numpy.array(
                range(visstart, visstart + nrows))

            ca1[rows] = a1
            ca2[rows] = a2

            # Average over time and frequency for case where polarisation isn't an issue
            def average_from_grid(arr):
                return average_chunks2(
                    arr, allpwtsgrid[:, a2, a1, :],
                    (time_average[a2, a1], frequency_average[a2, a1]))[0]

            ctime[rows] = average_from_grid(time_grid).flatten()
            cfrequency[rows] = average_from_grid(frequency_grid).flatten()

            for axis in range(3):
                uvwgrid = numpy.outer(uvw[:, a2, a1, axis],
                                      frequency / constants.c.value)
                cuvw[rows, axis] = average_from_grid(uvwgrid).flatten()

            # For some variables, we need the sum not the average
            def sum_from_grid(arr):
                result = average_chunks2(
                    arr, allpwtsgrid[:, a2, a1, :],
                    (time_average[a2, a1], frequency_average[a2, a1]))
                return result[0] * result[0].size

            cintegration_time[rows] = sum_from_grid(
                integration_time_grid).flatten()
            cchannel_bandwidth[rows] = sum_from_grid(
                channel_bandwidth_grid).flatten()

            # For the polarisations we have to perform the time-frequency average separately for each polarisation
            for pol in range(npol):
                result = average_chunks2(
                    vis[:, a2, a1, :, pol], wts[:, a2, a1, :, pol],
                    (time_average[a2, a1], frequency_average[a2, a1]))
                cvis[rows, pol], cwts[
                    rows, pol] = result[0].flatten(), result[1].flatten()

            # Now do the imaging weights
            for pol in range(npol):
                result = average_chunks2(
                    imaging_wts[:, a2, a1, :, pol], wts[:, a2, a1, :, pol],
                    (time_average[a2, a1], frequency_average[a2, a1]))
                cimwts[rows, pol] = result[0].flatten()

            visstart += nrows

    assert cnvis == visstart, "Mismatch between number of rows in coalesced visibility %d and index %d" % \
                              (cnvis, visstart)

    return cvis, cuvw, cwts, cimwts, ctime, cfrequency, cchannel_bandwidth, ca1, ca2, cintegration_time, cindex