def __init__(self, params): """ Initialize the parameters, which are common across different types of tracking channels. Parameters ---------- params : dictionary The subset of tracking channel parameters that are deemed to be common across different types of tracking channels. """ for (key, value) in params.iteritems(): setattr(self, key, value) self.prn = params['acq'].prn self.signal = params['acq'].signal self.results_num = 500 self.stage1 = True self.lock_detect = LockDetector( k1=self.lock_detect_params["k1"], k2=self.lock_detect_params["k2"], lp=self.lock_detect_params["lp"], lo=self.lock_detect_params["lo"]) self.alias_detect = AliasDetector( acc_len=defaults.alias_detect_interval_ms / self.coherent_ms, time_diff=1) self.cn0_est = CN0Estimator( bw=1e3 / self.coherent_ms, cn0_0=self.cn0_0, cutoff_freq=0.1, loop_freq=self.loop_filter_params["loop_freq"] ) self.loop_filter = self.loop_filter_class( loop_freq=self.loop_filter_params['loop_freq'], code_freq=self.code_freq_init, code_bw=self.loop_filter_params['code_bw'], code_zeta=self.loop_filter_params['code_zeta'], code_k=self.loop_filter_params['code_k'], carr_to_code=self.loop_filter_params['carr_to_code'], carr_freq=self.acq.doppler, carr_bw=self.loop_filter_params['carr_bw'], carr_zeta=self.loop_filter_params['carr_zeta'], carr_k=self.loop_filter_params['carr_k'], carr_freq_b1=self.loop_filter_params['carr_freq_b1'], ) self.next_code_freq = self.loop_filter.to_dict()['code_freq'] self.next_carr_freq = self.loop_filter.to_dict()['carr_freq'] self.track_result = TrackResults(self.results_num, self.acq.prn, self.acq.signal) self.alias_detect_init = 1 self.code_phase = 0.0 self.carr_phase = 0.0 self.samples_per_chip = int(round(self.sampling_freq / self.chipping_rate)) self.sample_index = params['samples']['sample_index'] self.sample_index += self.acq.sample_index self.sample_index += self.acq.code_phase * self.samples_per_chip self.sample_index = int(math.floor(self.sample_index)) self.carr_phase_acc = 0.0 self.code_phase_acc = 0.0 self.samples_tracked = 0 self.i = 0 self.pipelining = False # Flag if pipelining is used self.pipelining_k = 0. # Error prediction coefficient for pipelining self.short_n_long = False # Short/Long cycle simulation self.short_step = True # Short cycle if self.tracker_options: mode = self.tracker_options['mode'] if mode == 'pipelining': self.pipelining = True self.pipelining_k = self.tracker_options['k'] elif mode == 'short-long-cycles': self.short_n_long = True self.pipelining = True self.pipelining_k = self.tracker_options['k'] else: raise ValueError("Invalid tracker mode %s" % str(mode))
class TrackingChannel(object): """ Tracking channel base class. Specialized signal tracking channel classes are subclassed from this class. See TrackingChannelL1CA or TrackingChannelL2C as examples. Sub-classes can optionally implement :meth:'_run_preprocess', :meth:'_run_postprocess' and :meth:'_get_result' methods. The class is designed to support batch processing of sample data. This is to help processing of large data sample files without the need of loading the whole file into a memory. The class instance keeps track of the next sample to be processed in the form of an index within the original data file. Each sample data batch comes with its starting index within the original data file. Given the starting index of the batch and its own index of the next sample to be processed, the code computes the offset within the batch and starts/continues the tracking procedure from there. """ def __init__(self, params): """ Initialize the parameters, which are common across different types of tracking channels. Parameters ---------- params : dictionary The subset of tracking channel parameters that are deemed to be common across different types of tracking channels. """ for (key, value) in params.iteritems(): setattr(self, key, value) self.prn = params['acq'].prn self.signal = params['acq'].signal self.results_num = 500 self.stage1 = True self.lock_detect = LockDetector( k1=self.lock_detect_params["k1"], k2=self.lock_detect_params["k2"], lp=self.lock_detect_params["lp"], lo=self.lock_detect_params["lo"]) self.alias_detect = AliasDetector( acc_len=defaults.alias_detect_interval_ms / self.coherent_ms, time_diff=1) self.cn0_est = CN0Estimator( bw=1e3 / self.coherent_ms, cn0_0=self.cn0_0, cutoff_freq=0.1, loop_freq=self.loop_filter_params["loop_freq"] ) self.loop_filter = self.loop_filter_class( loop_freq=self.loop_filter_params['loop_freq'], code_freq=self.code_freq_init, code_bw=self.loop_filter_params['code_bw'], code_zeta=self.loop_filter_params['code_zeta'], code_k=self.loop_filter_params['code_k'], carr_to_code=self.loop_filter_params['carr_to_code'], carr_freq=self.acq.doppler, carr_bw=self.loop_filter_params['carr_bw'], carr_zeta=self.loop_filter_params['carr_zeta'], carr_k=self.loop_filter_params['carr_k'], carr_freq_b1=self.loop_filter_params['carr_freq_b1'], ) self.next_code_freq = self.loop_filter.to_dict()['code_freq'] self.next_carr_freq = self.loop_filter.to_dict()['carr_freq'] self.track_result = TrackResults(self.results_num, self.acq.prn, self.acq.signal) self.alias_detect_init = 1 self.code_phase = 0.0 self.carr_phase = 0.0 self.samples_per_chip = int(round(self.sampling_freq / self.chipping_rate)) self.sample_index = params['samples']['sample_index'] self.sample_index += self.acq.sample_index self.sample_index += self.acq.code_phase * self.samples_per_chip self.sample_index = int(math.floor(self.sample_index)) self.carr_phase_acc = 0.0 self.code_phase_acc = 0.0 self.samples_tracked = 0 self.i = 0 self.pipelining = False # Flag if pipelining is used self.pipelining_k = 0. # Error prediction coefficient for pipelining self.short_n_long = False # Short/Long cycle simulation self.short_step = True # Short cycle if self.tracker_options: mode = self.tracker_options['mode'] if mode == 'pipelining': self.pipelining = True self.pipelining_k = self.tracker_options['k'] elif mode == 'short-long-cycles': self.short_n_long = True self.pipelining = True self.pipelining_k = self.tracker_options['k'] else: raise ValueError("Invalid tracker mode %s" % str(mode)) def dump(self): """ Append intermediate tracking results to a file. """ fn_analysis, fn_results = self.track_result.dump(self.output_file, self.i) self.i = 0 return fn_analysis, fn_results def start(self): """ Start tracking channel. For the time being only prints an informative log message about the initial parameters of the tracking channel. """ logger.info("[PRN: %d (%s)] Tracking is started. " "IF: %.1f, Doppler: %.1f, code phase: %.1f, " "sample index: %d" % (self.prn + 1, self.signal, self.IF, self.acq.doppler, self.acq.code_phase, self.acq.sample_index)) def get_index(self): """ Return index of next sample to be processed by the tracking channel. The tracking channel is designed to process the input data samples in batches. A single batch is fed to multiple tracking channels. To keep track of the order of samples within one tracking channel, each channel maintains an index of the next sample to be processed. This method is a getter method for the index. Returns ------- sample_index: integer The next data sample to be processed. """ return self.sample_index def _run_preprocess(self): """ Customize the tracking run procedure in a subclass. The method can be optionally redefined in a subclass to perform a subclass specific actions to happen before correlator runs next integration round. """ pass def _run_postprocess(self): """ Customize the tracking run procedure in a subclass. The method can be optionally redefined in a subclass to perform a subclass specific actions to happen after correlator runs next integration round. """ pass def _get_result(self): """ Customize the tracking run procedure outcome in a subclass. The method can be optionally redefined in a subclass to return a subclass specific data as a result of the tracking procedure. Returns ------- out : None is returned by default. """ return None def _short_n_long_preprocess(self): pass def _short_n_long_postprocess(self): pass def is_pickleable(self): """ Check if object is pickleable. The base class instance is always pickleable. If a subclass is not pickleable, then it should redefine the method and return False. The need to know if an object is pickleable or not arises from the fact that we try to run the tracking procedure for multiple tracking channels on multiple CPU cores, if more than one core is available. This is done to speed up the overall processing time. When a tracking channel runs on a separate CPU core, it also runs on a separate process. When the tracking of the given batch of data is over, the process exits and the tracking channel state is returned to the parent process. This requires serialization (pickling) of the tracking object state, which might not be always trivial. This method essentially defines if the tracking channels can be run in a separate processs. If the object is not pickleable, then the tracking for the channel is done on the same CPU, which runs the parent process. Therefore all non-pickleable tracking channels are processed sequentially. Returns ------- out : bool True if the object is pickleable, False - if not. """ return True def run(self, samples): """ Run tracking channel for the given batch of data. This method is an entry point for the tracking procedure. Subclasses normally will not redefine the method, but instead redefine the customization methods '_run_preprocess', '_run_postprocess' and '_get_result' to run signal specific tracking operations. Parameters ---------- sample : dictionary Sample data. Sample data are provided in batches Return ------ The return value is determined by '_get_result' customization method, which can be redefined in subclasses """ self.samples = samples if self.sample_index < samples['sample_index']: raise ValueError("Incorrect samples offset") sample_index = self.sample_index - samples['sample_index'] samples_processed = 0 samples_total = len(samples[self.signal]['samples']) estimated_blksize = self.coherent_ms * self.sampling_freq / 1e3 self.track_result.status = 'T' while self.samples_tracked < self.samples_to_track and \ (sample_index + 2 * estimated_blksize) < samples_total: self._run_preprocess() if self.pipelining: # Pipelining and prediction corr_code_freq = self.next_code_freq corr_carr_freq = self.next_carr_freq self.next_code_freq = self.loop_filter.to_dict()['code_freq'] self.next_carr_freq = self.loop_filter.to_dict()['carr_freq'] if self.short_n_long and not self.stage1 and not self.short_step: # In case of short/long cycles, the correction applicable for the # long cycle is smaller proportionally to the actual cycle size pipelining_k = self.pipelining_k / (self.coherent_ms - 1) else: pipelining_k = self.pipelining_k # There is an error between target frequency and actual one. Affect # the target frequency according to the computed error carr_freq_error = self.next_carr_freq - corr_carr_freq self.next_carr_freq += carr_freq_error * pipelining_k code_freq_error = self.next_code_freq - corr_code_freq self.next_code_freq += code_freq_error * pipelining_k else: # Immediate correction simulation self.next_code_freq = self.loop_filter.to_dict()['code_freq'] self.next_carr_freq = self.loop_filter.to_dict()['carr_freq'] corr_code_freq = self.next_code_freq corr_carr_freq = self.next_carr_freq coherent_iter, code_chips_to_integrate = self._short_n_long_preprocess() for _ in range(self.coherent_iter): if (sample_index + 2 * estimated_blksize) >= samples_total: break samples_ = samples[self.signal]['samples'][sample_index:] E_, P_, L_, blksize, self.code_phase, self.carr_phase = self.correlator( samples_, code_chips_to_integrate, corr_code_freq + self.chipping_rate, self.code_phase, corr_carr_freq + self.IF, self.carr_phase, self.prn_code, self.sampling_freq, self.signal ) if blksize > estimated_blksize: estimated_blksize = blksize sample_index += blksize samples_processed += blksize self.carr_phase_acc += corr_carr_freq * blksize / self.sampling_freq self.code_phase_acc += corr_code_freq * blksize / self.sampling_freq self.E += E_ self.P += P_ self.L += L_ more_integration_needed = self._short_n_long_postprocess() if more_integration_needed: continue # Update PLL lock detector lock_detect_outo, \ lock_detect_outp, \ lock_detect_pcount1, \ lock_detect_pcount2, \ lock_detect_lpfi, \ lock_detect_lpfq = self.lock_detect.update(self.P.real, self.P.imag, coherent_iter) if lock_detect_outo: if self.alias_detect_init: self.alias_detect_init = 0 self.alias_detect.reinit(defaults.alias_detect_interval_ms / self.coherent_iter, time_diff=1) self.alias_detect.first(self.P.real, self.P.imag) alias_detect_err_hz = \ self.alias_detect.second(self.P.real, self.P.imag) * np.pi * \ (1e3 / defaults.alias_detect_interval_ms) self.alias_detect.first(self.P.real, self.P.imag) else: self.alias_detect_init = 1 alias_detect_err_hz = 0 self.loop_filter.update(self.E, self.P, self.L) self.track_result.coherent_ms[self.i] = self.coherent_ms self.track_result.IF = self.IF self.track_result.carr_phase[self.i] = self.carr_phase self.track_result.carr_phase_acc[self.i] = self.carr_phase_acc self.track_result.carr_freq[self.i] = \ self.loop_filter.to_dict()['carr_freq'] + self.IF self.track_result.code_phase[self.i] = self.code_phase self.track_result.code_phase_acc[self.i] = self.code_phase_acc self.track_result.code_freq[self.i] = \ self.loop_filter.to_dict()['code_freq'] + self.chipping_rate # Record stuff for postprocessing self.track_result.absolute_sample[self.i] = self.sample_index + \ samples_processed self.track_result.E[self.i] = self.E self.track_result.P[self.i] = self.P self.track_result.L[self.i] = self.L self.track_result.cn0[self.i] = self.cn0_est.update( self.P.real, self.P.imag) self.track_result.lock_detect_outo[self.i] = lock_detect_outo self.track_result.lock_detect_outp[self.i] = lock_detect_outp self.track_result.lock_detect_pcount1[self.i] = lock_detect_pcount1 self.track_result.lock_detect_pcount2[self.i] = lock_detect_pcount2 self.track_result.lock_detect_lpfi[self.i] = lock_detect_lpfi self.track_result.lock_detect_lpfq[self.i] = lock_detect_lpfq self.track_result.alias_detect_err_hz[self.i] = alias_detect_err_hz self._run_postprocess() self.samples_tracked = self.sample_index + samples_processed self.track_result.ms_tracked[self.i] = self.samples_tracked * 1e3 / \ self.sampling_freq self.i += 1 if self.i >= self.results_num: self.dump() if self.i > 0: self.dump() self.sample_index += samples_processed return self._get_result()