def test_TrackResultsObj1():
  '''
  Test for combined channel data iterations.
  '''
  __testSetup()

  tr = TrackingResults('test_output.bin')
  assert tr.channelCount() == 2
  assert isinstance(tr.getEntries(), list)
  assert len(tr.getEntries()) == 2
  co = tr.combinedResult()
  assert isinstance(co, TrackingResults.MultiChannel)

  it = iter(co)
  for i in range(500):
    tr1, idx1 = it.next()
    tr2, idx2 = it.next()
    assert tr1.status == 'A'
    assert tr2.status == 'B'
    assert idx1 == idx2 == i
    assert tr1.ms_tracked[i] == i * 2
    assert tr2.ms_tracked[i] == i * 2 + 1

  try:
    it.next()
    assert False
  except StopIteration:
    pass
  removeTrackingOutputFiles("test_output.bin")
예제 #2
0
def test_TrackResultsObj_dump():
    '''
  Sanity test for combined data output in a textual form.
  '''
    __testSetup()
    tr = TrackingResults('test_output.bin')
    tr.dump()

    removeTrackingOutputFiles("test_output.bin")
def test_TrackResultsObj_dump():
  '''
  Sanity test for combined data output in a textual form.
  '''
  __testSetup()
  tr = TrackingResults('test_output.bin')
  tr.dump()

  removeTrackingOutputFiles("test_output.bin")
def test_TrackResultsObj0():
  removeTrackingOutputFiles("test_output.bin")
  tr = TrackingResults('test_output.bin')
  assert tr.channelCount() == 0
  assert isinstance(tr.getEntries(), list)
  assert len(tr.getEntries()) == 0
  co = tr.combinedResult()
  assert isinstance(co, TrackingResults.MultiChannel)
  try:
    iter(co).next()
    assert False
  except StopIteration:
    pass
예제 #5
0
def test_TrackResultsObj1():
    '''
  Test for combined channel data iterations.
  '''
    __testSetup()

    tr = TrackingResults('test_output.bin')
    assert tr.channelCount() == 2
    assert isinstance(tr.getEntries(), list)
    assert len(tr.getEntries()) == 2
    co = tr.combinedResult()
    assert isinstance(co, TrackingResults.MultiChannel)

    it = iter(co)
    for i in range(500):
        tr1, idx1 = it.next()
        tr2, idx2 = it.next()
        assert tr1.status == 'A'
        assert tr2.status == 'B'
        assert idx1 == idx2 == i
        assert tr1.ms_tracked[i] == i * 2
        assert tr2.ms_tracked[i] == i * 2 + 1

    try:
        it.next()
        assert False
    except StopIteration:
        pass
    removeTrackingOutputFiles("test_output.bin")
def test_TrackResultsObj_single1():
  '''
  Test for individual channel data iterations.
  '''
  __testSetup()

  tr = TrackingResults('test_output.bin')
  assert tr.channelCount() == 2
  assert isinstance(tr.getEntries(), list)
  assert len(tr.getEntries()) == 2
  c0 = tr.channelResult(0)
  c1 = tr.channelResult(1)
  assert isinstance(c0, TrackingResults.SingleChannel)
  assert isinstance(c1, TrackingResults.SingleChannel)

  it1 = iter(c0)
  it2 = iter(c1)
  for i in range(500):
    tr1, idx1 = it1.next()
    tr2, idx2 = it2.next()
    assert tr1.status == 'A'
    assert tr2.status == 'B'
    assert idx1 == idx2 == i
    assert tr1.ms_tracked[i] == i * 2
    assert tr2.ms_tracked[i] == i * 2 + 1
  try:
    it1.next()
    assert False
  except StopIteration:
    pass
  try:
    it2.next()
    assert False
  except StopIteration:
    pass

  removeTrackingOutputFiles("test_output.bin")
예제 #7
0
def test_TrackResultsObj0():
    removeTrackingOutputFiles("test_output.bin")
    tr = TrackingResults('test_output.bin')
    assert tr.channelCount() == 0
    assert isinstance(tr.getEntries(), list)
    assert len(tr.getEntries()) == 0
    co = tr.combinedResult()
    assert isinstance(co, TrackingResults.MultiChannel)
    try:
        iter(co).next()
        assert False
    except StopIteration:
        pass
예제 #8
0
def test_TrackResultsObj_single1():
    '''
  Test for individual channel data iterations.
  '''
    __testSetup()

    tr = TrackingResults('test_output.bin')
    assert tr.channelCount() == 2
    assert isinstance(tr.getEntries(), list)
    assert len(tr.getEntries()) == 2
    c0 = tr.channelResult(0)
    c1 = tr.channelResult(1)
    assert isinstance(c0, TrackingResults.SingleChannel)
    assert isinstance(c1, TrackingResults.SingleChannel)

    it1 = iter(c0)
    it2 = iter(c1)
    for i in range(500):
        tr1, idx1 = it1.next()
        tr2, idx2 = it2.next()
        assert tr1.status == 'A'
        assert tr2.status == 'B'
        assert idx1 == idx2 == i
        assert tr1.ms_tracked[i] == i * 2
        assert tr2.ms_tracked[i] == i * 2 + 1
    try:
        it1.next()
        assert False
    except StopIteration:
        pass
    try:
        it2.next()
        assert False
    except StopIteration:
        pass

    removeTrackingOutputFiles("test_output.bin")
예제 #9
0
파일: run.py 프로젝트: cbeighley/peregrine
def main():
  default_logging_config()

  parser = argparse.ArgumentParser()

  parser.add_argument("-a", "--skip-acquisition",
                      help="use previously saved acquisition results",
                      action="store_true")
  parser.add_argument("-t", "--skip-tracking",
                      help="use previously saved tracking results",
                      action="store_true")
  parser.add_argument("-n", "--skip-navigation",
                      help="use previously saved navigation results",
                      action="store_true")

  populate_peregrine_cmd_line_arguments(parser)

  args = parser.parse_args()

  if args.no_run:
    return 0

  if args.file is None:
    parser.print_help()
    return

  if args.profile == 'peregrine' or args.profile == 'custom_rate':
    freq_profile = defaults.freq_profile_peregrine
  elif args.profile == 'low_rate':
    freq_profile = defaults.freq_profile_low_rate
  elif args.profile == 'normal_rate':
    freq_profile = defaults.freq_profile_normal_rate
  elif args.profile == 'high_rate':
    freq_profile = defaults.freq_profile_high_rate
  else:
    raise NotImplementedError()

  if args.l1ca_profile:
    profile = defaults.l1ca_stage_profiles[args.l1ca_profile]
    stage2_coherent_ms = profile[1]['coherent_ms']
    stage2_params = profile[1]['loop_filter_params']
  else:
    stage2_coherent_ms = None
    stage2_params = None

  if args.pipelining is not None:
    tracker_options = {'mode': 'pipelining', 'k': args.pipelining}
  else:
    tracker_options = None

  ms_to_process = int(args.ms_to_process)

  skip_samples = 0
  if args.skip_samples is not None:
    skip_samples = args.skip_samples
  if args.skip_ms is not None:
    skip_samples = int(args.skip_ms * freq_profile['sampling_freq'] / 1e3)

  samples = {gps.L1CA: {'IF': freq_profile['GPS_L1_IF']},
             gps.L2C: {'IF': freq_profile['GPS_L2_IF']},
             'samples_total': -1,
             'sample_index': skip_samples}

  # Do acquisition
  acq_results_file = args.file + ".acq_results"
  if args.skip_acquisition:
    logging.info("Skipping acquisition, loading saved acquisition results.")
    try:
      acq_results = load_acq_results(acq_results_file)
    except IOError:
      logging.critical("Couldn't open acquisition results file '%s'.",
                       acq_results_file)
      sys.exit(1)
  else:
    for signal in [gps.L1CA]:

      samplesPerCode = int(round(freq_profile['sampling_freq'] /
                                 (gps.l1ca_chip_rate / gps.l1ca_code_length)))

      # Get 11ms of acquisition samples for fine frequency estimation
      load_samples(samples=samples,
                   num_samples=11 * samplesPerCode,
                   filename=args.file,
                   file_format=args.file_format)

      acq = Acquisition(signal,
                        samples[signal]['samples'],
                        freq_profile['sampling_freq'],
                        freq_profile['GPS_L1_IF'],
                        gps.l1ca_code_period * freq_profile['sampling_freq'],
                        gps.l1ca_code_length)
      # only one signal - L1CA is expected to be acquired at the moment
      # TODO: add handling of acquisition results from GLONASS once GLONASS
      # acquisition is supported.
      acq_results = acq.acquisition(progress_bar_output=args.progress_bar)

    print "Acquisition is over!"

    try:
      save_acq_results(acq_results_file, acq_results)
      logging.debug("Saving acquisition results as '%s'" % acq_results_file)
    except IOError:
      logging.error("Couldn't save acquisition results file '%s'.",
                    acq_results_file)

  # Filter out non-acquired satellites.
  acq_results = [ar for ar in acq_results if ar.status == 'A']

  if len(acq_results) == 0:
    logging.critical("No satellites acquired!")
    sys.exit(1)

  acq_results.sort(key=attrgetter('snr'), reverse=True)

  # Track the acquired satellites
  if not args.skip_tracking:
    # Remove tracking output files from the previous session.
    removeTrackingOutputFiles(args.file)

    load_samples(samples=samples,
                 filename=args.file,
                 file_format=args.file_format)

    if ms_to_process < 0:
      ms_to_process = int(
          1e3 * samples['samples_total'] / freq_profile['sampling_freq'])

    # Create the tracker object, which also create one tracking
    # channel per each acquisition result in 'acq_results' list.
    tracker = tracking.Tracker(samples=samples,
                               channels=acq_results,
                               ms_to_track=ms_to_process,
                               sampling_freq=freq_profile[
                                   'sampling_freq'],  # [Hz]
                               stage2_coherent_ms=stage2_coherent_ms,
                               stage2_loop_filter_params=stage2_params,
                               tracker_options=tracker_options,
                               output_file=args.file,
                               progress_bar_output=args.progress_bar,
                               check_l2c_mask=args.check_l2c_mask)
    # The tracking channels are designed to support batch processing.
    # In the batch processing mode the data samples are provided in
    # batches (chunks) of 'defaults.processing_block_size' bytes size.
    # The loop below runs all tracking channels for each batch as it
    # reads it from the samples file.
    tracker.start()
    condition = True
    while condition:
      # Each tracking channel remembers its own data samples offset within
      # 'samples' such that when new batch of data is provided, it
      # starts precisely, where it finished at the previous batch
      # processing round.
      # 'sample_index' is set to the smallest offset within 'samples'
      # array across all tracking channels.
      sample_index = tracker.run_channels(samples)
      if sample_index == samples['sample_index']:
        condition = False
      else:
        samples['sample_index'] = sample_index
        load_samples(samples=samples,
                     filename=args.file,
                     file_format=args.file_format)
    fn_results = tracker.stop()

    logging.debug("Saving tracking results as '%s'" % fn_results)

  # Do navigation
  if not args.skip_navigation:
    combinedResultObject = TrackingResults(args.file)

    # Dump combined output into a text file
    with open(createTrackingDumpOutputFileName(args.file), "wt") as f:
      logging.debug("Creating combined tracking file %s", f.name)
      combinedResultObject.dump(f)

    samplingFreqHz = freq_profile['sampling_freq']
    nav_solns = navigation(combinedResultObject, samplingFreqHz)
    nav_results = []
    for s, t in nav_solns:
      nav_results += [(t, s.pos_llh, s.vel_ned)]
    if len(nav_results):
      print "First nav solution: t=%s lat=%.5f lon=%.5f h=%.1f vel_ned=(%.2f, %.2f, %.2f)" % (
          nav_results[0][0],
          np.degrees(nav_results[0][1][0]), np.degrees(
              nav_results[0][1][1]), nav_results[0][1][2],
          nav_results[0][2][0], nav_results[0][2][1], nav_results[0][2][2])
      nav_results_file = args.file + ".nav_results"
      with open(nav_results_file, 'wb') as f:
        cPickle.dump(nav_results, f, protocol=cPickle.HIGHEST_PROTOCOL)
      print "and %d more are cPickled in '%s'." % (len(nav_results) - 1, nav_results_file)
    else:
      print "No navigation results."
예제 #10
0
def main():
    default_logging_config()

    parser = argparse.ArgumentParser()

    parser.add_argument("-a",
                        "--skip-acquisition",
                        help="use previously saved acquisition results",
                        action="store_true")
    parser.add_argument("-t",
                        "--skip-tracking",
                        help="use previously saved tracking results",
                        action="store_true")
    parser.add_argument("-n",
                        "--skip-navigation",
                        help="use previously saved navigation results",
                        action="store_true")

    populate_peregrine_cmd_line_arguments(parser)

    args = parser.parse_args()

    if args.no_run:
        return 0

    if args.file is None:
        parser.print_help()
        return

    if args.profile == 'peregrine' or args.profile == 'custom_rate':
        freq_profile = defaults.freq_profile_peregrine
    elif args.profile == 'low_rate':
        freq_profile = defaults.freq_profile_low_rate
    elif args.profile == 'normal_rate':
        freq_profile = defaults.freq_profile_normal_rate
    elif args.profile == 'high_rate':
        freq_profile = defaults.freq_profile_high_rate
    else:
        raise NotImplementedError()

    if args.l1ca_profile:
        profile = defaults.l1ca_stage_profiles[args.l1ca_profile]
        stage2_coherent_ms = profile[1]['coherent_ms']
        stage2_params = profile[1]['loop_filter_params']
    else:
        stage2_coherent_ms = None
        stage2_params = None

    if args.pipelining is not None:
        tracker_options = {'mode': 'pipelining', 'k': args.pipelining}
    else:
        tracker_options = None

    ms_to_process = int(args.ms_to_process)

    skip_samples = 0
    if args.skip_samples is not None:
        skip_samples = args.skip_samples
    if args.skip_ms is not None:
        skip_samples = int(args.skip_ms * freq_profile['sampling_freq'] / 1e3)

    samples = {
        gps.L1CA: {
            'IF': freq_profile['GPS_L1_IF']
        },
        gps.L2C: {
            'IF': freq_profile['GPS_L2_IF']
        },
        'samples_total': -1,
        'sample_index': skip_samples
    }

    # Do acquisition
    acq_results_file = args.file + ".acq_results"
    if args.skip_acquisition:
        logging.info(
            "Skipping acquisition, loading saved acquisition results.")
        try:
            acq_results = load_acq_results(acq_results_file)
        except IOError:
            logging.critical("Couldn't open acquisition results file '%s'.",
                             acq_results_file)
            sys.exit(1)
    else:
        for signal in [gps.L1CA]:

            samplesPerCode = int(
                round(freq_profile['sampling_freq'] /
                      (gps.l1ca_chip_rate / gps.l1ca_code_length)))

            # Get 11ms of acquisition samples for fine frequency estimation
            load_samples(samples=samples,
                         num_samples=11 * samplesPerCode,
                         filename=args.file,
                         file_format=args.file_format)

            acq = Acquisition(
                signal, samples[signal]['samples'],
                freq_profile['sampling_freq'], freq_profile['GPS_L1_IF'],
                gps.l1ca_code_period * freq_profile['sampling_freq'],
                gps.l1ca_code_length)
            # only one signal - L1CA is expected to be acquired at the moment
            # TODO: add handling of acquisition results from GLONASS once GLONASS
            # acquisition is supported.
            acq_results = acq.acquisition(
                progress_bar_output=args.progress_bar)

        print "Acquisition is over!"

        try:
            save_acq_results(acq_results_file, acq_results)
            logging.debug("Saving acquisition results as '%s'" %
                          acq_results_file)
        except IOError:
            logging.error("Couldn't save acquisition results file '%s'.",
                          acq_results_file)

    # Filter out non-acquired satellites.
    acq_results = [ar for ar in acq_results if ar.status == 'A']

    if len(acq_results) == 0:
        logging.critical("No satellites acquired!")
        sys.exit(1)

    acq_results.sort(key=attrgetter('snr'), reverse=True)

    # Track the acquired satellites
    if not args.skip_tracking:
        # Remove tracking output files from the previous session.
        removeTrackingOutputFiles(args.file)

        load_samples(samples=samples,
                     filename=args.file,
                     file_format=args.file_format)

        if ms_to_process < 0:
            ms_to_process = int(1e3 * samples['samples_total'] /
                                freq_profile['sampling_freq'])

        # Create the tracker object, which also create one tracking
        # channel per each acquisition result in 'acq_results' list.
        tracker = tracking.Tracker(
            samples=samples,
            channels=acq_results,
            ms_to_track=ms_to_process,
            sampling_freq=freq_profile['sampling_freq'],  # [Hz]
            stage2_coherent_ms=stage2_coherent_ms,
            stage2_loop_filter_params=stage2_params,
            tracker_options=tracker_options,
            output_file=args.file,
            progress_bar_output=args.progress_bar,
            check_l2c_mask=args.check_l2c_mask)
        # The tracking channels are designed to support batch processing.
        # In the batch processing mode the data samples are provided in
        # batches (chunks) of 'defaults.processing_block_size' bytes size.
        # The loop below runs all tracking channels for each batch as it
        # reads it from the samples file.
        tracker.start()
        condition = True
        while condition:
            # Each tracking channel remembers its own data samples offset within
            # 'samples' such that when new batch of data is provided, it
            # starts precisely, where it finished at the previous batch
            # processing round.
            # 'sample_index' is set to the smallest offset within 'samples'
            # array across all tracking channels.
            sample_index = tracker.run_channels(samples)
            if sample_index == samples['sample_index']:
                condition = False
            else:
                samples['sample_index'] = sample_index
                load_samples(samples=samples,
                             filename=args.file,
                             file_format=args.file_format)
        fn_results = tracker.stop()

        logging.debug("Saving tracking results as '%s'" % fn_results)

    # Do navigation
    if not args.skip_navigation:
        combinedResultObject = TrackingResults(args.file)

        # Dump combined output into a text file
        with open(createTrackingDumpOutputFileName(args.file), "wt") as f:
            logging.debug("Creating combined tracking file %s", f.name)
            combinedResultObject.dump(f)

        samplingFreqHz = freq_profile['sampling_freq']
        nav_solns = navigation(combinedResultObject, samplingFreqHz)
        nav_results = []
        for s, t in nav_solns:
            nav_results += [(t, s.pos_llh, s.vel_ned)]
        if len(nav_results):
            print "First nav solution: t=%s lat=%.5f lon=%.5f h=%.1f vel_ned=(%.2f, %.2f, %.2f)" % (
                nav_results[0][0], np.degrees(
                    nav_results[0][1][0]), np.degrees(nav_results[0][1][1]),
                nav_results[0][1][2], nav_results[0][2][0],
                nav_results[0][2][1], nav_results[0][2][2])
            nav_results_file = args.file + ".nav_results"
            with open(nav_results_file, 'wb') as f:
                cPickle.dump(nav_results, f, protocol=cPickle.HIGHEST_PROTOCOL)
            print "and %d more are cPickled in '%s'." % (len(nav_results) - 1,
                                                         nav_results_file)
        else:
            print "No navigation results."