Beispiel #1
0
 def test_average_chunks_jit(self):
     arr = numpy.linspace(0.0, 100.0, 11)
     wts = numpy.ones_like(arr)
     carr, cwts = average_chunks(arr, wts, 2)
     carr_jit, cwts_jit = average_chunks_jit(arr, wts, 2)
     numpy.testing.assert_array_equal(carr, carr_jit)
     numpy.testing.assert_array_equal(cwts, cwts_jit)
Beispiel #2
0
 def test_average_chunks_zero(self):
     arr = numpy.linspace(0.0, 90.0, 10)
     wts = numpy.ones_like(arr)
     carr, cwts = average_chunks(arr, wts, 0)
     assert len(carr) == len(cwts)
     numpy.testing.assert_array_equal(carr, arr)
     numpy.testing.assert_array_equal(cwts, wts)
Beispiel #3
0
 def test_average_chunks_single(self):
     arr = numpy.linspace(0.0, 100.0, 11)
     wts = numpy.ones_like(arr)
     carr, cwts = average_chunks(arr, wts, 12)
     assert len(carr) == len(cwts)
     answerarr = numpy.array([50.0])
     answerwts = numpy.array([11.0])
     numpy.testing.assert_array_equal(carr, answerarr)
     numpy.testing.assert_array_equal(cwts, answerwts)
Beispiel #4
0
 def test_average_chunks_exact(self):
     arr = numpy.linspace(0.0, 90.0, 10)
     wts = numpy.ones_like(arr)
     carr, cwts = average_chunks(arr, wts, 2)
     assert len(carr) == len(cwts)
     answerarr = numpy.array([5., 25., 45., 65.0, 85.0])
     answerwts = numpy.array([2.0, 2.0, 2.0, 2.0, 2.0])
     numpy.testing.assert_array_equal(carr, answerarr)
     numpy.testing.assert_array_equal(cwts, answerwts)
Beispiel #5
0
def average_in_blocks(vis,
                      uvw,
                      wts,
                      times,
                      integration_time,
                      frequency,
                      channel_bandwidth,
                      time_coal=1.0,
                      max_time_coal=100,
                      frequency_coal=1.0,
                      max_frequency_coal=100):
    # Calculate the averaging factors for time and frequency making them the same for all times
    # for this baseline
    # Find the maximum possible baseline and then scale to this.

    # The input visibility is a block of shape [ntimes, nant, nant, nchan, npol]. We will map this
    # into rows like vis[npol] and with additional columns antenna1, antenna2, frequency

    ntimes, nant, _, nchan, npol = vis.shape

    # Pol independent weighting
    allpwtsgrid = numpy.sum(wts, axis=4)
    # Pol and frequency independent weighting
    allcpwtsgrid = numpy.sum(allpwtsgrid, axis=3)
    # Pol and time independent weighting
    alltpwtsgrid = numpy.sum(allpwtsgrid, axis=0)

    # Now calculate on a baseline basis the time and frequency averaging. We do this by looking at
    # the maximum uv distance for all data and for a given baseline. The integration time and
    # channel bandwidth are scale appropriately.
    uvmax = numpy.sqrt(numpy.max(uvw[:, 0]**2 + uvw[:, 1]**2 + uvw[:, 2]**2))
    time_average = numpy.ones([nant, nant], dtype='int')
    frequency_average = numpy.ones([nant, nant], dtype='int')
    ua = numpy.arange(nant)
    for a2 in ua:
        for a1 in ua:
            if allpwtsgrid[:, a2, a1, :].any() > 0.0:
                uvdist = numpy.max(numpy.sqrt(uvw[:, a2, a1, 0]**2 +
                                              uvw[:, a2, a1, 1]**2),
                                   axis=0)
                if uvdist > 0.0:
                    time_average[a2, a1] = min(
                        max_time_coal,
                        max(1, int(round((time_coal * uvmax / uvdist)))))
                    frequency_average[a2, a1] = min(
                        max_frequency_coal,
                        max(1, int(round(frequency_coal * uvmax / uvdist))))
                else:
                    time_average[a2, a1] = max_time_coal
                    frequency_average[a2, a1] = max_frequency_coal

    # See how many time chunks and frequency we need for each baseline. To do this we use the same averaging that
    # we will use later for the actual data_models. This tells us the number of chunks required for each baseline.
    frequency_grid, time_grid = numpy.meshgrid(frequency, times)
    channel_bandwidth_grid, integration_time_grid = numpy.meshgrid(
        channel_bandwidth, integration_time)
    cnvis = 0
    time_chunk_len = numpy.ones([nant, nant], dtype='int')
    frequency_chunk_len = numpy.ones([nant, nant], dtype='int')
    for a2 in ua:
        for a1 in ua:
            if (time_average[a2, a1] >
                    0) & (frequency_average[a2, a1] > 0 &
                          (allpwtsgrid[:, a2, a1, ...].any() > 0.0)):
                time_chunks, _ = average_chunks(times, allcpwtsgrid[:, a2, a1],
                                                time_average[a2, a1])
                time_chunk_len[a2, a1] = time_chunks.shape[0]
                frequency_chunks, _ = average_chunks(frequency,
                                                     alltpwtsgrid[a2, a1, :],
                                                     frequency_average[a2, a1])
                frequency_chunk_len[a2, a1] = frequency_chunks.shape[0]
                nrows = time_chunk_len[a2, a1] * frequency_chunk_len[a2, a1]
                cnvis += nrows

    # Now we know enough to define the output coalesced arrays. The shape will be
    # succesive a1, a2: [len_time_chunks[a2,a1], a2, a1, len_frequency_chunks[a2,a1]]
    ctime = numpy.zeros([cnvis])
    cfrequency = numpy.zeros([cnvis])
    cchannel_bandwidth = numpy.zeros([cnvis])
    cvis = numpy.zeros([cnvis, npol], dtype='complex')
    cwts = numpy.zeros([cnvis, npol])
    cuvw = numpy.zeros([cnvis, 3])
    ca1 = numpy.zeros([cnvis], dtype='int')
    ca2 = numpy.zeros([cnvis], dtype='int')
    cintegration_time = numpy.zeros([cnvis])

    # For decoalescence we keep an index to map back to the original BlockVisibility
    rowgrid = numpy.zeros([ntimes, nant, nant, nchan], dtype='int')
    rowgrid.flat = range(rowgrid.size)

    cindex = numpy.zeros([rowgrid.size], dtype='int')

    # Now go through, chunking up the various arrays. Everything is converted into an array with
    # axes [time, channel] and then it is averaged over time and frequency chunks for
    # this baseline.
    # To aid decoalescence we will need an index of which output elements a given input element
    # contributes to. This is a many to one. The decoalescence will then just consist of using
    # this index to extract the coalesced value that a given input element contributes towards.

    visstart = 0
    for a2 in ua:
        for a1 in ua:
            if (time_chunk_len[a2, a1] > 0) & (frequency_chunk_len[a2, a1] > 0) & \
                    (allpwtsgrid[:, a2, a1, :].any() > 0.0):

                nrows = time_chunk_len[a2, a1] * frequency_chunk_len[a2, a1]
                rows = slice(visstart, visstart + nrows)

                cindex.flat[rowgrid[:, a2, a1, :]] = numpy.array(
                    range(visstart, visstart + nrows))

                ca1[rows] = a1
                ca2[rows] = a2

                # Average over time and frequency for case where polarisation isn't an issue
                def average_from_grid(arr):
                    return average_chunks2(
                        arr, allpwtsgrid[:, a2, a1, :],
                        (time_average[a2, a1], frequency_average[a2, a1]))[0]

                ctime[rows] = average_from_grid(time_grid).flatten()
                cfrequency[rows] = average_from_grid(frequency_grid).flatten()

                for axis in range(3):
                    uvwgrid = numpy.outer(uvw[:, a2, a1, axis],
                                          frequency / constants.c.value)
                    cuvw[rows, axis] = average_from_grid(uvwgrid).flatten()

                # For some variables, we need the sum not the average
                def sum_from_grid(arr):
                    result = average_chunks2(
                        arr, allpwtsgrid[:, a2, a1, :],
                        (time_average[a2, a1], frequency_average[a2, a1]))
                    return result[0] * result[0].size

                cintegration_time[rows] = sum_from_grid(
                    integration_time_grid).flatten()
                cchannel_bandwidth[rows] = sum_from_grid(
                    channel_bandwidth_grid).flatten()

                # For the polarisations we have to perform the time-frequency average separately for each polarisation
                for pol in range(npol):
                    result = average_chunks2(
                        vis[:, a2, a1, :, pol], wts[:, a2, a1, :, pol],
                        (time_average[a2, a1], frequency_average[a2, a1]))
                    cvis[rows, pol], cwts[
                        rows, pol] = result[0].flatten(), result[1].flatten()

                visstart += nrows

    assert cnvis == visstart, "Mismatch between number of rows in coalesced visibility and index"

    return cvis, cuvw, cwts, ctime, cfrequency, cchannel_bandwidth, ca1, ca2, cintegration_time, cindex
Beispiel #6
0
def simulate_rfi_image(config, times, frequency, channel_bandwidth,
                       phasecentre, polarisation_frame, time_average,
                       channel_average, attenuation, noise, emitter_location,
                       emitter_power, use_pole, waterfall, write_ms):
    averaged_frequency = numpy.array(
        average_chunks(frequency, numpy.ones_like(frequency),
                       channel_average))[0]
    averaged_channel_bandwidth, wts = numpy.array(
        average_chunks(channel_bandwidth, numpy.ones_like(frequency),
                       channel_average))
    averaged_channel_bandwidth *= wts
    averaged_times = numpy.array(
        average_chunks(times, numpy.ones_like(times), time_average))[0]

    s2r = numpy.pi / 43200.0
    bvis = create_blockvisibility(config,
                                  s2r * times,
                                  frequency,
                                  channel_bandwidth=channel_bandwidth,
                                  phasecentre=phasecentre,
                                  polarisation_frame=polarisation_frame,
                                  zerow=False)

    bvis = simulate_rfi_block(bvis,
                              emitter_location=emitter_location,
                              emitter_power=emitter_power,
                              attenuation=attenuation,
                              use_pole=use_pole)

    if noise:
        bvis = add_noise(bvis)

    if waterfall:
        plot_waterfall(bvis)

    if write_ms:
        msname = "simulate_rfi_%.1f.ms" % (times[0])
        export_blockvisibility_to_ms(msname, [bvis], "RFI")

    averaged_bvis = create_blockvisibility(
        config,
        s2r * averaged_times,
        averaged_frequency,
        channel_bandwidth=averaged_channel_bandwidth,
        phasecentre=phasecentre,
        polarisation_frame=polarisation_frame,
        zerow=False)
    npol = 1
    for itime, _ in enumerate(averaged_times):
        atime = itime * time_average
        for ant2 in range(nants):
            for ant1 in range(ant2, nants):
                for ichan, _ in enumerate(averaged_frequency):
                    achan = ichan * channel_average
                    for pol in range(npol):
                        averaged_bvis.data['vis'][itime, ant2, ant1, ichan, pol] = \
                            calculate_averaged_correlation(
                                bvis.data['vis'][atime:(atime+time_average), ant2, ant1, achan:(achan+channel_average), pol],
                                time_average, channel_average)[0,0]
                        averaged_bvis.data['vis'][itime, ant1, ant2, ichan, pol] = \
                            numpy.conjugate(averaged_bvis.data['vis'][itime, ant2, ant1, ichan, pol])
                    achan += 1
        atime += 1

    del bvis

    if noise:
        averaged_bvis = add_noise(averaged_bvis)

    return averaged_bvis
Beispiel #7
0
                               nintegrations_per_chunk * integration_time)
    print("Start times", start_times)
    results = list()
    pole_results = list()

    chunk_start_times = [
        start_times[i:i + args.ngroup_visibility]
        for i in range(0, len(start_times), args.ngroup_visibility)
    ]
    print("Chunk start times", [c[0] for c in chunk_start_times])

    dopsf = args.do_psf == "True"

    # Find the average frequencies
    averaged_frequency = numpy.array(
        average_chunks(frequency, numpy.ones_like(frequency),
                       channel_average))[0]
    if len(averaged_frequency) > 1:
        step = abs(averaged_frequency[-1] -
                   averaged_frequency[0]) / (len(averaged_frequency) - 1)
    else:
        step = channel_average * channel_bandwidth
    averaged_channel_bandwidth = step * numpy.ones_like(averaged_frequency)

    print("Each averaged chunk has %d integrations of duration %.2f (s)" %
          (nintegrations_per_chunk // time_average,
           time_average * integration_time))
    print("Each averaged chunk has %d channels of width %.3f (MHz)" %
          (len(averaged_frequency), 1e-6 * averaged_channel_bandwidth[0]))
    print("Processing %d time chunks in groups of %d" %
          (len(start_times), args.ngroup_visibility))