Example #1
0
def write_single_antenna_to_binary_file(input_file, dp_stand_id, polarization,
                                        output_file):
    """Extract a single dp_stand/pol to a npy file.

    Parameters
    ----------
    input_file : string
                raw LWA-SV file path
    dp_stand_id : int
                stand id from 1 to 256 inclusive
    polarization : int
                antenna polarization
    output_file : string
                filename to be saved/appended to
    """

    if not output_file.endswith(".singleAnt"):
        output_file = output_file + ".singleAnt"

    input_data = LWASVDataFile(input_file)

    with open(output_file, 'ab') as f:
        while input_data.get_remaining_frame_count() > 0:
            current_frame = input_data.read_frame()
            if current_frame.id == (dp_stand_id, polarization):
                float_arr = np.array(current_frame.data.iq).view(float)
                float_arr.tofile(f)
Example #2
0
def extract_single_ant_from_middle(input_file,
                                   dp_stand_id,
                                   polarization,
                                   max_length=-1,
                                   tstart=0):
    """Extract and combine all data from a single antenna into a numpy array.

    Parameters
    ----------
    input_file : string
                raw LWA-SV file path
    DP_stand_id : int
                stand id from 1 to 256 inclusive
    polarization : int
                antenna polarization
    max_length : int
            length in samples to extract
    tstart : int
            UTC timestamp (s since epoch)

    Returns
    -------
    numpy array
        array of size (avail frames, bandwidth)
    """

    input_data = LWASVDataFile(input_file)
    output_data = []

    total_frames = input_data.get_remaining_frame_count()
    num_ants = input_data.getInfo()['nantenna']
    samps_per_frame = 512
    max_possible_length = math.ceil(total_frames / num_ants) * samps_per_frame

    if max_length < 0:
        max_length = max_possible_length

    print("-| {} frames in file".format(total_frames))
    print("-| {} antennas in file".format(num_ants))
    print("-| {} samples per frame".format(samps_per_frame))
    print("--| Extracting from stand {}, pol {}".format(
        dp_stand_id, polarization))
    print("--| Extracting {} of a possible {} samples".format(
        max_length, max_possible_length))

    while len(output_data) < max_length:
        # while input_data.get_remaining_frame_count() > 0:
        current_frame = input_data.read_frame()
        current_tstamp = current_frame.getTime()

        if current_tstamp >= tstart:
            if current_frame.id == (dp_stand_id, polarization):
                for i in range(len(current_frame.data.iq)):
                    output_data.append(current_frame.data.iq[i])

    output_data = np.array(output_data)

    return output_data
Example #3
0
def count_frames(filename):
    """Prints out the number of frames for each antenna from a TBN file

    Parameters
    ----------
    filename : string
                name of file to be read (may end in dat, tbn, or nothing)
    """
    def __getKeysByValue__(myDict, valueToFind):
        listOfKeys = []
        listOfItems = myDict.items()
        for item in listOfItems:
            if item[1] == valueToFind:
                listOfKeys.append(item[0])
        return listOfKeys

    bigDict = {}

    idfN = LWASVDataFile(filename)
    total_num_frames = idfN.get_remaining_frame_count()

    while idfN.get_remaining_frame_count() > 0:
        current_frame = idfN.read_frame()
        key = str(current_frame.id)

        try:
            bigDict[key] = bigDict[key] + 1
        except KeyError:
            bigDict[key] = 1

    # Make a list of unique frame counts
    unique_frame_counts = set(bigDict.values())

    # Create dict with key = num_ants that each have value = num_frames
    antsFramesDict = {}

    for i in unique_frame_counts:
        num_frames = i
        num_ants = len(__getKeysByValue__(bigDict, num_frames))
        antsFramesDict[num_ants] = num_frames

    total_calculated_frames = 0

    print("STATS")
    print("-> Total number of frames in file: %s" % total_num_frames)
    for key, value in antsFramesDict.iteritems():
        print("---> Number of antennas with %s frames: %s" % (value, key))
        total_calculated_frames = total_calculated_frames + (key * value)
    print("SANITY CHECK")
    print("-> Frames")
    print("---> Sum of frames = {}".format(total_calculated_frames))
    print("-> Antennas")
    print("---> Sum of antennas = {}".format(sum(antsFramesDict.keys())))
Example #4
0
def make_sample_tbn(filename, num_frames=2000000, offset=0):
    """Takes the defined number of frames and writes them to a new .tbn file

    Parameters
    ----------
    filename : string
                name of file to be read (may end in dat, tbn, or nothing)
    num_frames  :  int
                number of frames to be kept (default: 2000000)
    offset      : float
                number of seconds to skip into file before reading (approximate)
    """

    in_name = os.path.realpath(filename).split('/')[-1]
    in_base_name = in_name.split('.')[0]

    out_name = in_base_name + '.tbn'

    print(f"Reading data from {filename}")
    print(f"Writing data to {out_name}")

    if os.path.exists(out_name):
        raise RuntimeError(
            f"Output file {out_name} already exists - not going to overwrite it"
        )

    in_tbn = LWASVDataFile(filename)
    out_fh = open(out_name, 'wb')

    if offset > 0:
        t = in_tbn.offset(offset)
        print(f"Requested offset: {offset} seconds")
        print(f"Achieved offset: {t} seconds")

    out_fh.write(in_tbn.fh.read(tbn.FRAME_SIZE * num_frames))
    out_fh.close()

    in_tbn.close()

    print("\n{} TBN Size: {} kB".format(out_name,
                                        os.path.getsize(out_name) / 1024.))

    # Check that the datatype is correct according to lsl
    out_tbn = LWASVDataFile(out_name)
    print("{} is of type: {} \n".format(out_name, type(out_tbn)))

    out_tbn.close()
Example #5
0
def meta_to_txt(filename, outdir='./', station='lwasv'):
    """Pulls metadata from TBN file and puts it into a txt file of the same name

    Parameters
    ----------
    filename : string
                name of file to be read (may end in dat, tbn, or nothing)
    outdir : string
                name of output directory to save textfile in
    station : string
                one of 'lwasv' or 'lwa1' for now
    """

    simple_name = filename.split('/')[-1]
    simple_name = simple_name.split('.')[0]

    if not outdir.endswith('/'):
        outdir = outdir + '/'
    if not pathlib.Path(outdir).exists():
        os.mkdir(outdir)

    print("{} TBN Size: {} kB".format(filename,
                                      os.path.getsize(filename) / 1024))

    if station == 'lwasv':
        idfN = LWASVDataFile(filename)
        simple_name = simple_name + '-LWASV'
    elif station == 'lwa1':
        idfN = LWA1DataFile(filename)
        simple_name = simple_name + '-LWA1'
    else:
        raise NotImplementedError(
            "I haven't implemented that type of station yet")
    print("{} is of type: {}".format(filename, type(idfN)))

    # Poll the TBN file for its specifics
    with open(outdir + simple_name + ".txt", 'w') as meta:
        meta.write('TBN Metadata:\n')
        for key, value in idfN.get_info().items():
            meta.write("  %s: %s\n" % (str(key), str(value)))
    idfN.close()
Example #6
0
def pull_meta(filename, key):
    """ Pulls out metadata from a TBN file given a key.

    Possible keys: 'size','nframe','frame_size', 'nantenna','sample_rate','data_bits','start_time','start_time_samples','freq1'

    Parameters
    ----------
    filename : string
                name of the file to pull the meta from
    key : string
                one of the possible keys listed above
    """
    idfN = LWASVDataFile(filename)
    if key == 'Human start time':
        tbnKey = 'start_time'
    else:
        tbnKey = key
    value = idfN.get_info()[tbnKey]
    if key == 'Human start time':
        return str(value.utc_datetime)
    else:
        return str(value)
    print(m_range)
    for x, l in enumerate(l_range):
        for y, m in enumerate(m_range):
            cost[x,y] = ls_cost([l, m], u, v, vis)


    plt.contourf(l_range, m_range, cost)
    plt.colorbar()
    plt.xlabel("l")
    plt.ylabel("m")
    plt.show()

if __name__ == "__main__":
    plt.close('all')
    ants, n_baselines = select_antennas(station.antennas, use_pol=0)
    dfile = LWASVDataFile(tbn_filename)
    #baselines, visibilities = compute_visibilities(dfile, ants, target_freq)
    dfile.close()

    azimuth = station.get_pointing_and_distance(transmitter_coords + [0])[0]

    bl1d = project_baselines(baselines, azimuth)
    phases = np.angle(visibilities[0])
    

    vis = visibilities[0]
    bl2d = np.array([np.array([b[0].stand.x - b[1].stand.x, b[0].stand.y - b[1].stand.y]) for b in baselines])

    u = bl2d[:, 0]
    v = bl2d[:, 1]
Example #8
0
def TBN_to_freq_bin_matrix_indexed_by_dp_stand(filename,
                                               Fc,
                                               f1,
                                               fft_size=512,
                                               Fs=100000,
                                               polarization=0):
    """Reads each from of a TBN, takes an FFT, and puts a single bin of it into an index
        particular to it's DP stand number. It continues to append bin values as so each index
        is the full time-series of frequency bin values of that DP stand. It concats the vectors
        to be the length of the shortest so that the resulting matrix is rectangular.

        *LIMITATION* : It only does one polarization.

    Parameters
    ----------
    filename : string
                name of file to be read (may end in dat, tbn, or nothing)
    Fc : float
                center frequency in Hz
    f1 : float
                frequency of the signal to extract
    fft_size : int
                size of FFT window
    Fs : int
                sampling rate
    polarization : int
                which polarization to process, either 0 (default) or 1

    Returns
    -------
    numpy array
        array of size (num_dp_stands, samples_in_time_series)
    """

    bin_of_f1 = get_frequency_bin(fc=Fc, f1=f1, fft_size=fft_size)
    input_data = LWASVDataFile(filename)

    lwasv = stations.lwasv
    num_stands = len(lwasv.stands)
    num_ants = num_stands / 2

    #how many frames in total
    frame_count = input_data.get_remaining_frame_count()

    num_frames_per_ant = frame_count / num_ants

    # plus 1 to have space for a counter
    output_data = np.zeros((num_stands, num_frames_per_ant + 1),
                           dtype=np.complex64)

    current_frame = input_data.read_frame()
    # iq_size = len(current_frame.data.iq)

    count = 1

    while input_data.get_remaining_frame_count() > 0:
        (dp_stand_id, ant_polarization) = current_frame.id
        if ant_polarization == polarization:
            #NOT the same thing as the LWA stand number
            index = dp_stand_id - 1

            # Which cell to write to
            count = int(np.real(output_data[index, 0]) + 1)

            if count < num_frames_per_ant:

                fft = np.fft.fftshift(np.fft.fft(current_frame.data.iq))

                pt = fft[bin_of_f1]
                output_data[index, count] = pt

                # update counter
                output_data[index, 0] = count
        # Get frame for next iteration
        current_frame = input_data.read_frame()

    # Remove counter
    output_data = output_data[:, 1:]

    return output_data
Example #9
0
def main(args):
    station = stations.lwasv

    tx_coords = known_transmitters.parse_args(args)

    print("Opening TBN file ({})".format(args.tbn_filename))
    with LWASVDataFile(args.tbn_filename, ignore_timetag_errors=True) as tbnf:

        antennas = station.antennas

        valid_ants, n_baselines = select_antennas(antennas, args.use_pol)

        if not args.hdf5_file:
            raise RuntimeError('Please provide an output filename')
        else:
            with build_output_file(h5_fname=args.hdf5_file,
                                   tbnf=tbnf,
                                   valid_ants=valid_ants,
                                   n_baselines=n_baselines,
                                   tx_freq=args.tx_freq,
                                   fft_len=args.fft_len,
                                   use_pfb=args.use_pfb,
                                   use_pol=args.use_pol,
                                   integration_length=args.integration_length,
                                   transmitter_coords=tx_coords) as h5f:

                if args.point_finding_alg == 'all' or args.point_finding_alg == 'peak':
                    h5f.create_dataset_like('l_peak', h5f['l_est'])
                    h5f.create_dataset_like('m_peak', h5f['m_est'])
                    h5f.create_dataset_like('elevation_peak', h5f['elevation'])
                    h5f.create_dataset_like('azimuth_peak', h5f['azimuth'])
                if args.point_finding_alg == 'all' or args.point_finding_alg == 'CoM':
                    h5f.create_dataset_like('l_CoM', h5f['l_est'])
                    h5f.create_dataset_like('m_CoM', h5f['m_est'])
                    h5f.create_dataset_like('elevation_CoM', h5f['elevation'])
                    h5f.create_dataset_like('azimuth_CoM', h5f['azimuth'])
                else:
                    raise NotImplementedError(
                        f"Unrecognized point finding algorithm: {args.point_finding_alg}"
                    )
                del h5f['l_est']
                del h5f['m_est']
                del h5f['elevation']
                del h5f['azimuth']

                k = 0

                save_all_sky = (args.all_sky and k in args.all_sky) or (
                    args.all_sky_every and k % args.all_sky_every == 0
                )  # or (args.scatter_bad_fits and skip)

                if save_all_sky:
                    fig, ax = plt.subplots()

                for bl, freqs, vis in compute_visibilities_gen(
                        tbnf,
                        valid_ants,
                        integration_length=args.integration_length,
                        fft_length=args.fft_len,
                        use_pol=args.use_pol,
                        use_pfb=args.use_pfb):

                    gridded_image = grid_visibilities(bl, freqs, vis,
                                                      args.tx_freq, station)

                    save_all_sky = (args.all_sky and k in args.all_sky) or (
                        args.all_sky_every and k % args.all_sky_every == 0)

                    if args.point_finding_alg == 'all' or 'peak':
                        result = get_gimg_max(gridded_image,
                                              return_img=save_all_sky)
                        l = result[0]
                        m = result[1]
                        src_elev, src_az = lm_to_ea(l, m)
                        h5f['l_peak'][k] = l
                        h5f['m_peak'][k] = m
                        h5f['elevation_peak'][k] = src_elev
                        h5f['azimuth_peak'][k] = src_az

                    if args.point_finding_alg == 'all' or args.point_finding_alg == 'CoM':
                        result = get_gimg_center_of_mass(
                            gridded_image, return_img=save_all_sky)
                        l = result[0]
                        m = result[1]
                        src_elev, src_az = lm_to_ea(l, m)
                        h5f['l_CoM'][k] = l
                        h5f['m_CoM'][k] = m
                        h5f['elevation_CoM'][k] = src_elev
                        h5f['azimuth_CoM'][k] = src_az

                    if save_all_sky:
                        img = result[2]
                        extent = result[3]
                        ax.imshow(img,
                                  extent=extent,
                                  origin='lower',
                                  interpolation='nearest')
                        plt.savefig('allsky_int_{}.png'.format(k))

                    k += 1
                    print("\n\n")
                    if args.stop_after >= 0 and k >= args.stop_after:
                        break
Example #10
0
def generate_multiple_ants(input_file,
                           dp_stand_ids,
                           polarization,
                           chunk_length=2**20,
                           max_length=-1,
                           truncate=True):
    """Generate chunks of data from a list of antennas.

    Parameters
    ----------
    input_file : string
                raw LWA-SV file path
    dp_stand_ids : list
                list of stand ids from 1 to 256 inclusive
    polarization : list
                antenna polarization
    chunk_length : int
                length of each chunk to extract
    truncate : bool
                if the last chunk is shorter than chunk_length, return a short chunk
                otherwise, pad with zeros
    
    Returns
    -------
    numpy array
        array of size (avail frames, bandwidth)
    """
    input_data = LWASVDataFile(input_file)

    total_frames = input_data.get_remaining_frame_count()
    num_ants = input_data.get_info()['nantenna']
    samps_per_frame = 512
    max_possible_length = int(
        math.ceil(total_frames / num_ants) * samps_per_frame)

    if max_length < 0:
        max_length = max_possible_length

    print("-| {} frames in file".format(total_frames))
    print("-| {} antennas in file".format(num_ants))
    print("-| {} samples per frame".format(samps_per_frame))
    print("--| Extracting from stands {}, pol {}".format(
        dp_stand_ids, polarization))
    print("--| There are possibly {} samples for each stand".format(
        max_possible_length))
    print("--| Returning data in chunks of length {}".format(chunk_length))

    if chunk_length < samps_per_frame:
        raise ValueError(
            "--| Error: chunk size ({}) must be larger than frame size ({} samples)"
            .format(chunk_length, samps_per_frame))

    # preallocate array to hold the current chunk of data. leave some space for overflow
    chunk_buffer = np.empty((len(dp_stand_ids), int(chunk_length * 2)),
                            dtype=np.complex64)

    done = False
    samples_sent = 0
    file_ended = False
    compensating_start_times = True
    dropped_frames = 0
    start_times = [0] * len(dp_stand_ids)
    fill_levels = [0] * len(dp_stand_ids)

    while not done:
        # fill the chunk buffer
        while any([l < chunk_length for l in fill_levels]):
            # read a frame
            try:
                current_frame = input_data.read_frame()
            except errors.eofError:
                file_ended = True
                break

            current_id = current_frame.id
            for out_idx, stand in enumerate(dp_stand_ids):
                if (stand, polarization) == current_id:
                    # this is the right stand, add to the buffer
                    if compensating_start_times:
                        time = current_frame.time
                        if time >= max(start_times):
                            start_times[out_idx] = time
                            chunk_buffer[
                                out_idx][:
                                         samps_per_frame] = current_frame.data.iq
                            fill_levels[out_idx] = samps_per_frame
                        if start_times.count(start_times[0]) == len(
                                start_times) and start_times[0] > 0:
                            compensating_start_times = False
                            print("--| Start times match at time {:f}".format(
                                time))
                    else:
                        wr_idx = fill_levels[out_idx]
                        if wr_idx + samps_per_frame > chunk_buffer.shape[1]:
                            extend_by = max(int(0.2 * chunk_buffer.shape[1]),
                                            samps_per_frame)
                            extension = np.empty(
                                (chunk_buffer.shape[0], extend_by))
                            print(
                                "--|Chunk buffer overflowed, increasing length from {} to {}"
                                .format(chunk_buffer.shape[1],
                                        chunk_buffer.shape[1] + extend_by))
                            chunk_buffer = np.concatenate(
                                (chunk_buffer, extension), axis=1)
                        chunk_buffer[
                            out_idx][wr_idx:wr_idx +
                                     samps_per_frame] = current_frame.data.iq
                        fill_levels[out_idx] += samps_per_frame
                        break

        if samples_sent + chunk_length >= max_length:
            # this is the last chunk
            print("--| Requested number of samples read")
            done = True
            last_chunk_len = max_length - samples_sent
            if not truncate:
                chunk_buffer[:, last_chunk_len:] = 0
            yield chunk_buffer[:, :last_chunk_len]
        elif file_ended:
            # return unfinished chunk
            print("--| Reached end of file")
            min_fill = min(fill_levels)
            done = True
            if not truncate:
                chunk_buffer[:, last_chunk_len:] = 0
            yield chunk_buffer[:, :min_fill]
        else:
            # yield the chunk
            yield chunk_buffer[:, :chunk_length]
            samples_sent += chunk_length
            for i in range(len(chunk_buffer)):
                # check if there's more than a chunk of samples for any of the stands
                if fill_levels[i] > chunk_length:
                    # copy the extra samples to the start of the buffer
                    overflow_length = fill_levels[i] - chunk_length
                    chunk_buffer[i][0:overflow_length] = chunk_buffer[i][
                        chunk_length:fill_levels[i]]
                    # start the next read after the extra samples
                    fill_levels[i] = overflow_length
                else:
                    # otherwise we can overwrite the whole buffer
                    fill_levels[i] = 0
    return
Example #11
0
def extract_single_ant(input_file,
                       dp_stand_id,
                       polarization,
                       max_length=-1,
                       file_is_lwasvdatafile=False,
                       fill_missing=False):
    """Extract and combine all data from a single antenna into a numpy array.

    Parameters
    ----------
    input_file : string
                raw LWA-SV file path
    DP_stand_id : int
                stand id from 1 to 256 inclusive
    polarization : int
                antenna polarization
    max_length : int
                length in samples to extract

    Returns
    -------
    numpy array
        array of size (avail frames, bandwidth)
    """

    if file_is_lwasvdatafile:
        input_data = input_file
    else:
        input_data = LWASVDataFile(input_file)
    output_data = []

    total_frames = input_data.get_remaining_frame_count()
    num_ants = input_data.get_info()['nantenna']
    samps_per_frame = 512
    max_possible_length = math.ceil(total_frames / num_ants) * samps_per_frame

    expected_timetag_delta = int(
        samps_per_frame / input_data.get_info()['sample_rate'] * reference_fs)
    prev_timetag = None

    if max_length < 0:
        max_length = max_possible_length

    print("-| {} frames in file".format(total_frames))
    print("-| {} antennas in file".format(num_ants))
    print("-| {} samples per frame".format(samps_per_frame))
    print("--| Extracting from stand {}, pol {}".format(
        dp_stand_id, polarization))
    print("--| Extracting {} of a possible {} samples".format(
        max_length, max_possible_length))
    print("--| Expecting a timetag skip of {} between frames".format(
        expected_timetag_delta))

    # while input_data.get_remaining_frame_count() > 0:
    while len(output_data) < max_length:
        try:
            current_frame = input_data.read_frame()
        except errors.EOFError:
            break

        if current_frame.id != (dp_stand_id, polarization):
            continue

        if prev_timetag is not None:
            if current_frame.payload.timetag != prev_timetag + expected_timetag_delta:
                warnings.warn(
                    f"WARNING: invalid timetag skip in sample {len(output_data) + 1}"
                )
                print(
                    f"Expected {expected_timetag_delta + prev_timetag} but got {current_frame.payload.timetag}"
                )
                print(
                    f"This is a difference of {current_frame.payload.timetag - prev_timetag} as opposed to the expected difference of {expected_timetag_delta}"
                )
                if fill_missing:
                    timetag_diff = current_frame.payload.timetag - prev_timetag
                    frames_dropped = int(timetag_diff / expected_timetag_delta)
                    print(
                        f"Filling {frames_dropped} missing frames with zeros ({samps_per_frame * frames_dropped} samples)"
                    )
                    print(len(output_data))
                    for i in range(samps_per_frame * frames_dropped):
                        output_data.append(0.0)
                    print(len(output_data))

        for i in range(len(current_frame.payload.data)):
            output_data.append(current_frame.payload.data[i])

        prev_timetag = current_frame.payload.timetag

    output_data = np.array(output_data)

    return output_data
Example #12
0
def extract_multiple_ants(input_file,
                          dp_stand_ids,
                          polarization,
                          max_length=-1,
                          truncate=True):
    """Extract and combine all data from a list of antenna into an array of numpy arrays.

    Parameters
    ----------
    input_file : string
                raw LWA-SV file path
    dp_stand_ids : list
                list of stand ids from 1 to 256 inclusive
    polarization : list
                antenna polarization
    max_length : int
                length in samples to extract
    truncate : boolean
                discard later frames so all antennas have the same number
    Returns
    -------
    numpy array
        array of size (len(dp_stand_ids), avail frames)
    """

    input_data = LWASVDataFile(input_file)

    total_frames = input_data.get_remaining_frame_count()
    num_ants = input_data.get_info()['nantenna']
    samps_per_frame = 512
    max_possible_length = int(
        math.ceil(total_frames / num_ants) * samps_per_frame)

    if max_length < 0:
        max_length = max_possible_length

    print("-| {} frames in file".format(total_frames))
    print("-| {} antennas in file".format(num_ants))
    print("-| {} samples per frame".format(samps_per_frame))
    print("--| Extracting from stands {}, pol {}".format(
        dp_stand_ids, polarization))
    print(
        "--| Attempting to extract {} of a possible {} samples for each stand".
        format(max_length, max_possible_length))

    # preallocate data array a little bigger than we think the longest signal will be
    output_data = np.zeros((len(dp_stand_ids), int(max_length * 1.2) + 1),
                           dtype=np.complex64)

    fill_levels = [0] * len(dp_stand_ids)

    # while input_data.get_remaining_frame_count() > 0:
    while any([l < max_length for l in fill_levels]):
        try:
            current_frame = input_data.read_frame()
        except errors.eofError:
            print("--| EOF reached before maximum length.")
            break

        current_id = current_frame.id

        # check if this frame is one we want
        matching_stand = next(
            (s for s in dp_stand_ids if (s, polarization) == current_id), -1)

        for s in dp_stand_ids:
            if (s, polarization) == current_id:
                out_index = dp_stand_ids.index(matching_stand)

                if fill_levels[out_index] < max_length:
                    wr_idx = fill_levels[out_index]
                    output_data[
                        out_index][wr_idx:wr_idx +
                                   samps_per_frame] = current_frame.data.iq
                    fill_levels[out_index] += samps_per_frame
                break

    min_fill = min(fill_levels)

    # if the lengths are unequal then truncate long ones
    if truncate and fill_levels.count(min_fill) != len(fill_levels):
        print("--| Truncating lengths from {} to {}".format(
            fill_levels, min_fill))
        return output_data[:, :min_fill]
    else:
        return output_data[:, :max_length]
Example #13
0
def main(args):

    # saz and sel are used later
    img = aipy.img.ImgW(size=50, res=0.5)
    top = img.get_top(center=(50, 50))
    saz, sel = aipy.coord.top2azalt(top)

    station = stations.lwasv

    tx_coords = known_transmitters.parse_args(args)

    print("Opening TBN file ({})".format(args.tbn_filename))
    with LWASVDataFile(args.tbn_filename, ignore_timetag_errors=True) as tbnf:

        antennas = station.antennas

        valid_ants, n_baselines = select_antennas(antennas, args.use_pol)

        if not args.hdf5_file:
            raise RuntimeError('Please provide an output filename')
        else:
            with build_output_file(
                    h5_fname=args.hdf5_file,
                    tbnf=tbnf,
                    valid_ants=valid_ants,
                    n_baselines=n_baselines,
                    # use_pfb=args.use_pfb, use_pol=args.use_pol,
                    integration_length=args.integration_length,
                    transmitter_coords=tx_coords) as h5f:

                del h5f['l_est']
                del h5f['m_est']

                freq = tbnf.get_info('freq1')
                idx = [ant.digitizer - 1 for ant in valid_ants]
                xyz = np.array([[ant.stand.x, ant.stand.y, ant.stand.z]
                                for ant in valid_ants])
                delays = np.array(
                    [ant.cable.delay(freq) for ant in valid_ants])
                delays -= delays.min()

                n_samples = tbnf.get_info('nframe') / tbnf.get_info('nantenna')
                samples_per_integration = int(args.integration_length *
                                              tbnf.get_info('sample_rate') /
                                              512)
                n_integrations = int(
                    np.floor(n_samples / samples_per_integration))

                for int_num in range(n_integrations):
                    print(
                        f"Starting iteration {int_num + 1} of {n_integrations}"
                    )
                    # Load in the data and select what we need
                    tInt, t0, data = tbnf.read(args.integration_length)

                    data = data[idx, :]

                    # Apply a phase rotation to deal with the cable delays
                    for i in range(data.shape[0]):
                        data[i, :] *= np.exp(2j * np.pi * freq * delays[i])
                    data /= (np.abs(data)).max()

                    # Calculate Rx - The time-averaged autocorrelation matrix
                    nSamp = data.shape[1]
                    xOutput = []
                    print("Computing time-averaged autocorrelation matrix")
                    for i in range(nSamp):
                        x = np.matrix(data[:, i]).T
                        xOutput.append(x)
                        try:
                            Rx += x * x.H
                        except:
                            Rx = x * x.H
                    Rx /= nSamp

                    # Find the eigenvectors/values for Rx and order them by significance
                    print("Computing eigenvectors/values of the ACM")
                    w, v = np.linalg.eig(Rx)
                    order = np.argsort(np.abs(w))[::-1]
                    w = w[order]
                    v = v[:, order]

                    # Break the eigenvalues into a signal sub-space, Us, and a noise sub-
                    # space, Un.  This is currently done based on the number of sources
                    # we have rather than inferred from the eigenvalues.
                    ##Us = numpy.where( numpy.abs(w) > sigma )[0] #TODO I think this part should help find frequency too but Jayce had it commented out because the sigma section wasn't working (see her tbnMusic.py script I think)
                    ##Un = numpy.where( numpy.abs(w) <= sigma )[0]
                    # Us = range(3) #TODO What Jayce had. I imagine she had 4 sources
                    # Un = range(3, w.size)
                    Us = range(1)
                    Un = range(1, w.size)

                    print("Evaluating MUSIC spectrum")
                    P = np.zeros_like(saz)
                    E = np.zeros_like(saz)
                    for i in range(saz.shape[0]):
                        print(
                            f"Starting row {i+1} / {saz.shape[0]} for integration {int_num}"
                        )
                        for j in range(saz.shape[1]):
                            ta = saz[i, j]
                            te = sel[i, j]
                            if not np.isfinite(ta) or not np.isfinite(te):
                                continue

                            pv = np.array([
                                np.cos(te) * np.sin(ta),
                                np.cos(te) * np.cos(ta),
                                np.sin(te)
                            ])

                            a = np.zeros((len(valid_ants), 1),
                                         dtype=np.complex128)
                            for k in range(len(valid_ants)):
                                a[k, 0] = np.exp(
                                    2j * np.pi * freq *
                                    np.dot(xyz[k, :] - xyz[0, :], pv) /
                                    speedOfLight)
                            a = np.matrix(a)

                            v2 = np.matrix(v[:, Un])
                            o = a.H * v2 * v2.H * a
                            P[i, j] = 1.0 / max([1e-9, o[0, 0].real])

                    spectrum_max_idx = np.where(P == P.max())
                    el_max = sel[spectrum_max_idx][0]
                    az_max = saz[spectrum_max_idx][0]
                    h5f['elevation'][int_num] = el_max
                    h5f['azimuth'][int_num] = az_max
                    print(
                        f"Integration complete - az = {az_max:.2f} el = {el_max:.2f}"
                    )
Example #14
0
def main(args):
    # this first part of the code is run by all processes

    # set up MPI environment
    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()
    size = comm.Get_size()

    if size < 2:
        raise RuntimeError(
            f"This program requires at least two MPI processes to function. Please rerun with more resources"
        )

    # designate the last process as the supervisor/file reader
    supervisor = size - 1

    # open the TBN file for reading
    tbnf = LWASVDataFile(args.tbn_filename, ignore_timetag_errors=True)

    # figure out the details of the run we want to do
    tx_coords = known_transmitters.parse_args(args)
    antennas = station.antennas
    valid_ants, n_baselines = select_antennas(antennas, args.use_pol)
    n_ants = len(valid_ants)

    sample_rate = tbnf.get_info('sample_rate')
    # some of our TBNs claim to have frame size 1024 but they are lying
    frame_size = 512
    tbn_center_freq = tbnf.get_info('freq1')

    total_integrations, _ = compute_integration_numbers(
        tbnf, args.integration_length)

    # open the output HDF5 file and create datasets
    # because of the way parallelism in h5py works all processes (even ones
    # that don't write to the file) must do this
    h5f = build_output_file(args.hdf5_file,
                            tbnf,
                            valid_ants,
                            n_baselines,
                            args.integration_length,
                            tx_freq=args.tx_freq,
                            fft_len=args.fft_len,
                            use_pfb=args.use_pfb,
                            use_pol=args.use_pol,
                            opt_method=opt_method,
                            vis_model='gaussian',
                            transmitter_coords=tx_coords,
                            mpi_comm=comm)

    if rank == supervisor:
        # the supervisor process runs this code
        print("supervisor: started")

        # state info
        reached_end = False
        workers_alive = [True for _ in range(size - 1)]
        int_no = 0

        while True:
            if not reached_end:
                # grab data for the next available worker
                try:
                    duration, start_time, data = tbnf.read(
                        args.integration_length)
                    # only use data from valid antennas
                    data = data[[a.digitizer - 1 for a in valid_ants], :]
                except EOFError:
                    reached_end = True
                    print(f"supervisor: reached EOF")

                if int_no >= total_integrations:
                    print(f"supervisor: this is the last integration")
                    reached_end = True

            # get the next "ready" message from the workers
            st = MPI.Status()
            msg = comm.recv(status=st)
            if msg == "ready":
                print(
                    f"supervisor: received 'ready' message from worker {st.source}"
                )

                # if we're done, send an exit message and mark that we've killed this worker
                # an empty array indicates that the worker should exit
                if reached_end:
                    print(
                        f"supervisor: sending exit message to worker {st.source}"
                    )
                    comm.Send(np.array([]), dest=st.source, tag=int_no)
                    workers_alive[st.source] = False

                    if not any(workers_alive):
                        print(f"supervisor: all workers told to exit, goodbye")
                        break
                # otherwise, send the data to the worker for processing
                else:
                    print(
                        f"supervisor: sending data for integration {int_no}/{total_integrations} to worker {st.source}"
                    )
                    # Send with a capital S is optimized to send numpy arrays
                    comm.Send(data, dest=st.source, tag=int_no)
                    int_no += 1
            else:
                raise ValueError(
                    f"Supervisor received unrecognized message '{msg}' from worker {st.source}"
                )

        tbnf.close()

    else:
        # the worker processes run this code
        print(f"worker {rank} started")

        # workers don't need access to the TBN file
        tbnf.close()

        # figure out the size of the incoming data buffer
        samples_per_integration = int(
            round(args.integration_length * sample_rate /
                  frame_size)) * frame_size
        buffer_shape = (n_ants, samples_per_integration)

        while True:
            # send with a lowercase s can send any pickle-able python object
            # this is a synchronous send - it will block until the message is read by the supervisor
            # the other sends (e.g. comm.Send) only block until the message is safely taken by MPI, which might happen before the receiver actually reads it
            comm.ssend("ready", dest=supervisor)

            # build a buffer to be filled with data
            data = np.empty(buffer_shape, np.complex64)

            # receive the data from the supervisor
            st = MPI.Status()
            comm.Recv(data, source=supervisor, status=st)

            int_no = st.tag

            # if the buffer is empty, we're done
            if st.count == 0:
                print(f"worker {rank}: received exit message, exiting")
                break

            # otherwise process the data we've recieved
            print(
                f"worker {rank}: received data for integration {int_no}, starting processing"
            )

            # run the correlator
            bl, freqs, vis = fxc.FXMaster(
                data,
                valid_ants,
                LFFT=args.fft_len,
                pfb=args.use_pfb,
                sample_rate=sample_rate,
                central_freq=tbn_center_freq,
                Pol='xx' if args.use_pol == 0 else 'yy',
                return_baselines=True,
                gain_correct=True)

            # extract the frequency bin we want
            target_bin = np.argmin([abs(args.tx_freq - f) for f in freqs])
            vis_tbin = vis[:, target_bin]

            # baselines in wavelengths
            uvw = uvw_from_antenna_pairs(bl, wavelength=3e8 / args.tx_freq)

            # model fitting
            l_out, m_out, opt_result = fit_model_to_vis(uvw,
                                                        vis_tbin,
                                                        residual_function,
                                                        l_init,
                                                        m_init,
                                                        verbose=False)

            # convert direction cosines to sky coords
            src_elev, src_az = lm_to_ea(l_out, m_out)

            # write data to h5 file
            h5f['l_start'][int_no] = l_init
            h5f['m_start'][int_no] = m_init
            h5f['l_est'][int_no] = l_out
            h5f['m_est'][int_no] = m_out
            h5f['elevation'][int_no] = src_elev
            h5f['azimuth'][int_no] = src_az
            h5f['cost'][int_no] = opt_result['cost']
            h5f['nfev'][int_no] = opt_result['nfev']

            # compute the bin power and save it to the file
            # arbitrarily picking the tenth antenna in this list
            power_calc_data = data[10, :]
            h5f['snr_est'][int_no] = estimate_snr(power_calc_data,
                                                  args.fft_len, args.tx_freq,
                                                  sample_rate, tbn_center_freq)

            print(f"worker {rank}: done processing integration {int_no}")

    # back to common code for both supervisor and workers

    h5f.attrs['total_integrations'] = int_no
    h5f.close()
Example #15
0
def main(args):
    # this first part of the code is run by all processes

    # set up MPI environment
    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()
    size = comm.Get_size()

    if size < 2:
        raise RuntimeError(
            f"This program requires at least two MPI processes to function. Please rerun with more resources"
        )

    # designate the last process as the supervisor/file reader
    supervisor = size - 1

    # open the TBN file for reading
    tbnf = LWASVDataFile(args.tbn_filename, ignore_timetag_errors=True)

    # figure out the details of the run we want to do
    tx_coords = known_transmitters.parse_args(args)
    antennas = station.antennas
    valid_ants, n_baselines = select_antennas(antennas, args.use_pol)
    n_ants = len(valid_ants)
    total_integrations, _ = compute_integration_numbers(
        tbnf, args.integration_length)

    sample_rate = tbnf.get_info('sample_rate')
    # some of our TBNs claim to have frame size 1024 but they are lying
    frame_size = 512
    tbn_center_freq = tbnf.get_info('freq1')

    # open the output HDF5 file and create datasets
    # because of the way parallelism in h5py works all processes (even ones
    # that don't write to the file) must do this
    h5f = build_output_file(args.hdf5_file,
                            tbnf,
                            valid_ants,
                            n_baselines,
                            args.integration_length,
                            tx_freq=args.tx_freq,
                            fft_len=args.fft_len,
                            use_pfb=args.use_pfb,
                            use_pol=args.use_pol,
                            transmitter_coords=tx_coords,
                            mpi_comm=comm)

    if args.point_finding_alg == 'all' or args.point_finding_alg == 'peak':
        h5f.create_dataset_like('l_peak', h5f['l_est'])
        h5f.create_dataset_like('m_peak', h5f['m_est'])
        h5f.create_dataset_like('elevation_peak', h5f['elevation'])
        h5f.create_dataset_like('azimuth_peak', h5f['azimuth'])
    if args.point_finding_alg == 'all' or args.point_finding_alg == 'CoM':
        h5f.create_dataset_like('l_CoM', h5f['l_est'])
        h5f.create_dataset_like('m_CoM', h5f['m_est'])
        h5f.create_dataset_like('elevation_CoM', h5f['elevation'])
        h5f.create_dataset_like('azimuth_CoM', h5f['azimuth'])
    else:
        raise NotImplementedError(
            f"Unrecognized point finding algorithm: {args.point_finding_alg}")
    del h5f['l_est']
    del h5f['m_est']
    del h5f['elevation']
    del h5f['azimuth']

    if rank == supervisor:
        # the supervisor process runs this code
        print("supervisor: started")

        # state info
        reached_end = False
        workers_alive = [True for _ in range(size - 1)]
        int_no = 0

        while True:
            if not reached_end:
                # grab data for the next available worker
                try:
                    duration, start_time, data = tbnf.read(
                        args.integration_length)
                    # only use data from valid antennas
                    data = data[[a.digitizer - 1 for a in valid_ants], :]
                except EOFError:
                    reached_end = True
                    print(f"supervisor: reached EOF")
                if int_no >= total_integrations:
                    print(f"supervisor: this is the last integration")
                    reached_end = True

            # get the next "ready" message from the workers
            st = MPI.Status()
            msg = comm.recv(status=st)
            if msg == "ready":
                print(
                    f"supervisor: received 'ready' message from worker {st.source}"
                )

                # if we're done, send an exit message and mark that we've killed this worker
                # an empty array indicates that the worker should exit
                if reached_end:
                    print(
                        f"supervisor: sending exit message to worker {st.source}"
                    )
                    comm.Send(np.array([]), dest=st.source, tag=int_no)
                    workers_alive[st.source] = False

                    if not any(workers_alive):
                        print(f"supervisor: all workers told to exit, goodbye")
                        break
                # otherwise, send the data to the worker for processing
                else:
                    print(
                        f"supervisor: sending data for integration {int_no}/{total_integrations} to worker {st.source}"
                    )
                    # Send with a capital S is optimized to send numpy arrays
                    comm.Send(data, dest=st.source, tag=int_no)
                    int_no += 1
            else:
                raise ValueError(
                    f"Supervisor received unrecognized message '{msg}' from worker {st.source}"
                )

        tbnf.close()

    else:
        # the worker processes run this code
        print(f"worker {rank} started")

        # workers don't need access to the TBN file
        tbnf.close()

        # figure out the size of the incoming data buffer
        samples_per_integration = int(
            round(args.integration_length * sample_rate /
                  frame_size)) * frame_size
        buffer_shape = (n_ants, samples_per_integration)

        while True:
            # send with a lowercase s can send any pickle-able python object
            # this is a synchronous send - it will block until the message is read by the supervisor
            # the other sends (e.g. comm.Send) only block until the message is safely taken by MPI, which might happen before the receiver actually reads it
            comm.ssend("ready", dest=supervisor)

            # build a buffer to be filled with data
            data = np.empty(buffer_shape, np.complex64)

            # receive the data from the supervisor
            st = MPI.Status()
            comm.Recv(data, source=supervisor, status=st)

            int_no = st.tag

            # if the buffer is empty, we're done
            if st.count == 0:
                print(f"worker {rank}: received exit message, exiting")
                break

            # otherwise process the data we've recieved
            print(
                f"worker {rank}: received data for integration {int_no}, starting processing"
            )

            # run the correlator
            bl, freqs, vis = fxc.FXMaster(
                data,
                valid_ants,
                LFFT=args.fft_len,
                pfb=args.use_pfb,
                sample_rate=sample_rate,
                central_freq=tbn_center_freq,
                Pol='xx' if args.use_pol == 0 else 'yy',
                return_baselines=True,
                gain_correct=True)

            gridded_image = grid_visibilities(bl, freqs, vis, args.tx_freq,
                                              station)

            save_all_sky = (args.all_sky and int_no in args.all_sky) or (
                args.all_sky_every and int_no % args.all_sky_every == 0)

            if args.point_finding_alg == 'all' or 'peak':
                result = get_gimg_max(gridded_image, return_img=save_all_sky)
                l = result[0]
                m = result[1]
                src_elev, src_az = lm_to_ea(l, m)
                h5f['l_peak'][int_no] = l
                h5f['m_peak'][int_no] = m
                h5f['elevation_peak'][int_no] = src_elev
                h5f['azimuth_peak'][int_no] = src_az

            if args.point_finding_alg == 'all' or args.point_finding_alg == 'CoM':
                result = get_gimg_center_of_mass(gridded_image,
                                                 return_img=save_all_sky)
                l = result[0]
                m = result[1]
                src_elev, src_az = lm_to_ea(l, m)
                h5f['l_CoM'][int_no] = l
                h5f['m_CoM'][int_no] = m
                h5f['elevation_CoM'][int_no] = src_elev
                h5f['azimuth_CoM'][int_no] = src_az

            if save_all_sky:
                img = result[2]
                extent = result[3]
                fig, ax = plt.subplots()
                ax.imshow(img,
                          extent=extent,
                          origin='lower',
                          interpolation='nearest')
                plt.savefig('allsky_int_{}.png'.format(int_no))

            # compute the bin power and save it to the file
            # arbitrarily picking the tenth antenna in this list
            power_calc_data = data[10, :]
            h5f['snr_est'][int_no] = estimate_snr(power_calc_data,
                                                  args.fft_len, args.tx_freq,
                                                  sample_rate, tbn_center_freq)

            print(f"worker {rank}: done processing integration {int_no}")

    # back to common code for both supervisor and workers
    h5f.attrs['total_integrations'] = int_no
    h5f.close()
Example #16
0
def main(args):

    start = time.time()

    print("\nCreating filenames and checking input file extension")
    input_file = args.filename


    ext = os.path.splitext(input_file)[-1].lower()
    if ext not in ['', '.tbn']:
        raise Exception("Extension should be .tbn or not exist")
    else:
        input_filename = os.path.split(os.path.splitext(input_file)[-2])[-1]
        output_file = input_filename + '.hdf5'
        print("-| Input file extension is {} (full name: {})".format(ext, input_file))
        print("-| Output file extension is '.hdf5 (full name: {})".format(output_file))

    print("\nChecking input data")
    input_data = LWASVDataFile(input_file)

    # For getting output array size
    lwasv = stations.lwasv
    num_stands = len(lwasv.stands)
    num_ants = num_stands/2
    min_frames = get_min_frame_count(input_file)
    print("-| Minimum number of frames is: {}".format(min_frames))
    print("-| Number of antennas per polarization: {}".format(num_ants))

    # Annoying to do this here
    current_frame = input_data.read_frame()
    iq_size = len(current_frame.data.iq)

    # Shape is the datasize plus 1 for a counter at each element
    # output_shape_with_counter = (num_ants, min_frames * iq_size + 1)
    output_shape = (num_ants, min_frames * iq_size)
    pol0_counters = np.zeros(num_ants, dtype=int)
    pol1_counters = np.zeros(num_ants, dtype=int)
    print("-| Shape of each output dataset will be {}".format(output_shape))

    print("\nCreating and opening output file")
    with h5py.File(output_file, "w") as f:
        
        # Create a group to store everything in, and to attach attributes to
        print("-| Creating parent group {}[{}]".format(output_file, input_filename))
        parent = f.create_group(input_filename)

        # Add attributes to group
        print("-| Adding TBN metadata as attributes to the parent group")
        for key, value in input_data.get_info().items():
            if key is "start_time":
                parent.attrs["Human start time"] = str(value.utc_datetime)
            parent.attrs[key] = value
            print("--| key: {}  | value: {}".format(key, value))
        
        # Create a subdataset for each polarization
        print("-| Creating datasets full of zeros")
        pol0 = parent.create_dataset("pol0", output_shape, dtype=np.complex64)#, compression='lzf')
        pol1 = parent.create_dataset("pol1", output_shape, dtype=np.complex64)#, compression='lzf')
        
        # For progress bar
        totalFrames = input_data.get_remaining_frame_count()
        current_iteration = 0

        print("-| Beginning to build output from input")
        while input_data.get_remaining_frame_count() > 0:
            current_iteration += 1
            printProgressBar(current_iteration, totalFrames)
            (frame_dp_stand_id, frame_ant_polarization) = current_frame.id

            frameData = current_frame.data.iq

            x_index = frame_dp_stand_id - 1

            if frame_ant_polarization == 0:
                counter = pol0_counters
                dset = pol0
            elif frame_ant_polarization == 1:
                counter = pol1_counters
                dset = pol1

            y_index = counter[x_index]
            
            if not isFrameLimited(y_index, len(frameData), min_frames):
                data_start = y_index
                data_end = data_start+len(frameData)

                dset[x_index, data_start:data_end] = frameData

                counter[x_index] = data_end


            # Get frame for next iteration
            current_frame = input_data.read_frame()

    print("\nDONE")

    end = time.time()
    totalTime = end-start
    print("\nThis script ran for {}s = {}min = {}h".format(totalTime, totalTime/60, totalTime/3600))
Example #17
0
def main(args):
    # Parse command line options
    filename = args.filename

    # Setup the LWA station information
    if args.metadata is not None:
        try:
            station = stations.parse_ssmif(args.metadata)
        except ValueError:
            station = metabundleADP.get_station(args.metadata, apply_sdm=True)
    else:
        station = stations.lwasv
    antennas = station.antennas

    idf = LWASVDataFile(filename)
    if not isinstance(idf, TBFFile):
        raise RuntimeError("File '%s' does not appear to be a valid TBF file" %
                           os.path.basename(filename))

    jd = idf.get_info('start_time').jd
    date = idf.get_info('start_time').datetime
    nFpO = idf.get_info('nchan') // 12
    sample_rate = idf.get_info('sample_rate')
    nInts = idf.get_info('nframe') // nFpO

    # Get valid stands for both polarizations
    goodX = []
    goodY = []
    for i in range(len(antennas)):
        ant = antennas[i]
        if ant.combined_status != 33 and not args.all:
            pass
        else:
            if ant.pol == 0:
                goodX.append(ant)
            else:
                goodY.append(ant)

    # Now combine both lists to come up with stands that
    # are in both so we can form the cross-polarization
    # products if we need to
    good = []
    for antX in goodX:
        for antY in goodY:
            if antX.stand.id == antY.stand.id:
                good.append(antX.digitizer - 1)
                good.append(antY.digitizer - 1)

    # Report on the valid stands found.  This is a little verbose,
    # but nice to see.
    print("Found %i good stands to use" % (len(good) // 2, ))
    for i in good:
        print("%3i, %i" % (antennas[i].stand.id, antennas[i].pol))

    # Number of frames to read in at once and average
    nFrames = min([int(args.avg_time * sample_rate), nInts])
    args.offset = idf.offset(args.offset)
    nSets = idf.get_info('nframe') // nFpO // nFrames
    nSets = nSets - int(args.offset * sample_rate) // nFrames

    central_freq = idf.get_info('freq1')
    central_freq = central_freq[len(central_freq) // 2]

    print("Data type:  %s" % type(idf))
    print("Samples per observations: %i" % nFpO)
    print("Sampling rate: %i Hz" % sample_rate)
    print("Tuning frequency: %.3f Hz" % central_freq)
    print("Captures in file: %i (%.3f s)" % (nInts, nInts / sample_rate))
    print("==")
    print("Station: %s" % station.name)
    print("Date observed: %s" % date)
    print("Julian day: %.5f" % jd)
    print("Offset: %.3f s (%i frames)" %
          (args.offset, args.offset * sample_rate))
    print("Integration Time: %.3f s" % (nFrames / sample_rate))
    print("Number of integrations in file: %i" % nSets)

    # Make sure we don't try to do too many sets
    if args.samples > nSets:
        args.samples = nSets

    # Loop over junks of 100 integrations to make sure that we don't overflow
    # the FITS IDI memory buffer
    s = 0
    leftToDo = args.samples
    basename = os.path.split(filename)[1]
    basename, ext = os.path.splitext(basename)
    while leftToDo > 0:
        fitsFilename = "%s.FITS_%i" % (
            basename,
            (s + 1),
        )

        if leftToDo > 100:
            chunk = 100
        else:
            chunk = leftToDo

        process_chunk(idf,
                      station,
                      good,
                      fitsFilename,
                      int_time=args.avg_time,
                      pols=args.products,
                      chunk_size=chunk)

        s += 1
        leftToDo = leftToDo - chunk

    idf.close()
Example #18
0
def main(args):

    print("Opening TBN file ({})".format(args.tbn_filename))
    with LWASVDataFile(args.tbn_filename, ignore_timetag_errors=True) as tbnf:
    
        antennas = station.antennas

        valid_ants, n_baselines = select_antennas(antennas, args.use_pol)

        tx_coords = known_transmitters.parse_args(args)

        if args.visibility_model == 'point':
            residual_function = point_residual_abs
            residual_function_chain = None
        elif args.visibility_model == 'gaussian':
            residual_function = bind_gaussian_residual(1)
            residual_function_chain = None
        elif args.visibility_model == 'chained':
            residual_function = bind_gaussian_residual(0.5)
            residual_function_chain = point_residual_abs
        else:
            raise RuntimeError("Unknown visibility model option: {args.visibility_model}")

        if not args.hdf5_file:
            raise RuntimeError('Please provide an output filename')
        else:
            with build_output_file(args.hdf5_file, tbnf, valid_ants,
                    n_baselines, args.integration_length, tx_freq=args.tx_freq,
                    fft_len=args.fft_len, use_pfb=args.use_pfb,
                    use_pol=args.use_pol, opt_method=opt_method,
                    vis_model=args.visibility_model,
                    transmitter_coords=tx_coords) as h5f:

                # arrays for estimated parameters from each integration
                l_est = np.array([args.l_guess])
                m_est = np.array([args.m_guess])

                k = 0
                for bl, freqs, vis in compute_visibilities_gen(tbnf, valid_ants, integration_length=args.integration_length, fft_length=args.fft_len, use_pol=args.use_pol, use_pfb=args.use_pfb):

                    # start the optimization at the mean point of the 10 most recent fits
                    if args.visibility_model == 'point':
                        l_init = l_est[-param_guess_av_length:].mean()
                        m_init = m_est[-param_guess_av_length:].mean()
                    else:
                        l_init = 0
                        m_init = 0

                    target_bin = np.argmin([abs(args.tx_freq - f) for f in freqs])
                    
                    # TODO: is this correct? should it be the bin center?
                    uvw = uvw_from_antenna_pairs(bl, wavelength=3e8/args.tx_freq)

                    vis_tbin = vis[:, target_bin]

                    # do the model fitting to get parameter estimates
                    l_out, m_out, opt_result = fit_model_to_vis(uvw, vis_tbin, residual_function, 
                            l_init, m_init, export_npy=args.export_npy)

                    nfev = opt_result['nfev']

                    if residual_function_chain:
                        l_out, m_out, opt_result_chain = fit_model_to_vis(uvw, vis_tbin, residual_function_chain,
                                l_out, m_out, export_npy=args.export_npy)

                        nfev += opt_result_chain['nfev']

                    cost = opt_result['cost']

                    # see if we should skip including this in future starting parameter estimates
                    skip = False
                    if args.exclude and k in args.exclude:
                        print("Not including in parameter estimates by request")
                        skip = True

                    if not skip:
                        l_est = np.append(l_est, l_out)
                        m_est = np.append(m_est, m_out)
                        #costs = np.append(costs, cost)

                    # compute source sky location from parameter values
                    src_elev, src_az = lm_to_ea(l_out, m_out)

                    # write data to h5 file
                    h5f['l_start'][k] = l_init
                    h5f['m_start'][k] = m_init
                    h5f['l_est'][k] = l_out
                    h5f['m_est'][k] = m_out
                    h5f['elevation'][k] = src_elev
                    h5f['azimuth'][k] = src_az
                    h5f['cost'][k] = cost
                    h5f['skipped'][k] = skip
                    h5f['nfev'][k] = nfev

                    save_scatter = (args.scatter and k in args.scatter) or (args.scatter_every and k % args.scatter_every == 0)
                    if save_scatter:
                        print("Plotting model and data scatter")
                        vis_phase_scatter_3d(uvw[:,0], uvw[:,1], vis_tbin, show=False,
                                html_savename=f"scatter_{k}.html", l=l_out, m=m_out)

                    k += 1
                    print("\n\n")