def track(self):
        self.tracks_fused = Track()
        self.tracks_radar = Track()
        for measurement_idx in range(0, len(self.measurements_radar)):
            # radar measurement every timestep, AIS measurement every second
            # first predict, then update with radar measurement. Then every second iteration, perform an extra update step
            # using the AIS measurement
            measurement_radar = self.measurements_radar[measurement_idx]

            prediction = self.predictor.predict(
                self.prior, timestamp=measurement_radar.timestamp)
            hypothesis = SingleHypothesis(prediction, measurement_radar)
            post = self.updater_radar.update(hypothesis)

            # save radar track
            self.tracks_radar.append(post)

            if measurement_idx % 2:
                measurement_ais = self.measurements_ais[measurement_idx // 2]
                hypothesis = SingleHypothesis(post, measurement_ais)
                post = self.updater_ais.update(hypothesis)

            # save fused track
            self.tracks_fused.append(post)
            self.prior = self.tracks_fused[-1]
        return self.tracks_fused, self.tracks_radar
Exemple #2
0
    def track(self, measurements_radar, measurements_ais, fusion_rate=1):
        """
        returns fused tracks. Assumes that the rate of the radar and ais measurements are the same, and that they are
        synchornized.
        """
        tracks_radar = Track()
        for measurement in measurements_radar:
            prediction = self.predictor_radar.predict(
                self.prior_radar, timestamp=measurement.timestamp)
            hypothesis = SingleHypothesis(prediction, measurement)
            post = self.updater_radar.update(hypothesis)
            tracks_radar.append(post)
            self.prior_radar = tracks_radar[-1]

        tracks_ais = Track()
        for measurement in measurements_ais:
            prediction = self.predictor_radar.predict(
                self.prior_ais, timestamp=measurement.timestamp)
            hypothesis = SingleHypothesis(prediction, measurement)
            post = self.updater_ais.update(hypothesis)
            tracks_ais.append(post)
            self.prior_ais = tracks_ais[-1]

        tracks_fused = self._fuse_tracks(tracks_radar,
                                         tracks_ais,
                                         fusion_rate=fusion_rate)
        return tracks_fused, tracks_ais, tracks_radar
    def initiate(self, detections, timestamp, **kwargs):
        MAX_DEV = 500.
        tracks = set()
        measurement_model = self.measurement_model
        for detection in detections:
            state_vector = measurement_model.inverse_function(detection)
            model_covar = measurement_model.covar()

            el_az_range = np.sqrt(np.diag(model_covar))  # elev, az, range

            std_pos = detection.state_vector[2, 0] * el_az_range[1]
            stdx = np.abs(std_pos * np.sin(el_az_range[1]))
            stdy = np.abs(std_pos * np.cos(el_az_range[1]))
            stdz = np.abs(detection.state_vector[2, 0] * el_az_range[0])
            if stdx > MAX_DEV:
                print('Warning - X Deviation exceeds limit!!')
            if stdy > MAX_DEV:
                print('Warning - Y Deviation exceeds limit!!')
            if stdz > MAX_DEV:
                print('Warning - Z Deviation exceeds limit!!')
            C0 = np.diag(np.array([stdx, 50.0, stdy, 50.0, stdz, 10.0])**2)

            tracks.add(
                Track([
                    GaussianStateUpdate(state_vector,
                                        C0,
                                        SingleHypothesis(None, detection),
                                        timestamp=detection.timestamp)
                ]))
        return tracks
Exemple #4
0
    def track(self):
        self.tracks_radar = Track()
        for measurement in self.measurements_radar:
            prediction = self.predictor_radar.predict(
                self.prior_radar, timestamp=measurement.timestamp)
            hypothesis = SingleHypothesis(prediction, measurement)
            post = self.updater_radar.update(hypothesis)
            self.tracks_radar.append(post)
            self.prior_radar = self.tracks_radar[-1]

        self.tracks_ais = Track()
        for measurement in self.measurements_ais:
            prediction = self.predictor_radar.predict(
                self.prior_ais, timestamp=measurement.timestamp)
            hypothesis = SingleHypothesis(prediction, measurement)
            post = self.updater_ais.update(hypothesis)
            self.tracks_ais.append(post)
            self.prior_ais = self.tracks_ais[-1]

        self.tracks_fused = self._fuse_tracks(self.tracks_radar,
                                              self.tracks_ais)

        return self.tracks_fused, self.tracks_radar, self.tracks_ais
Exemple #5
0
    def update(self, hypothesis, measurementmodel, **kwargs):
        measurement_matrix = measurementmodel.matrix()  # H
        measurement_noise_covar = measurementmodel.covar()  # R
        prediction_covar = hypothesis.prediction.covar  # P
        messprediction = self.get_measurement_prediction(
            hypothesis.prediction.mean, measurementmodel)

        S = measurement_matrix @ prediction_covar @ measurement_matrix.T + measurement_noise_covar  # S
        W = prediction_covar @ measurement_matrix.T @ np.linalg.pinv(S)  # W
        Innovation = hypothesis.measurement.state_vector - (
            measurement_matrix @ hypothesis.prediction.mean)  # v

        x_post = hypothesis.prediction.mean + W @ Innovation  # x + W @ v
        P_post = prediction_covar - (W @ S @ W.T)  # P - ( W @ S @ W.T )

        hypothesis = SingleHypothesis(
            hypothesis.prediction, hypothesis.measurement,
            GaussianMeasurementPrediction(messprediction, S,
                                          hypothesis.prediction.timestamp))

        return GaussianStateUpdate(x_post, P_post, hypothesis,
                                   hypothesis.measurement.timestamp)
def test_information(UpdaterClass, measurement_model, prediction, measurement):
    """Tests the information form of the Kalman filter update step."""

    # This is how the Kalman filter does it
    kupdater = KalmanUpdater(measurement_model)
    kposterior = kupdater.update(SingleHypothesis(prediction, measurement))

    # Create the information state representation
    prediction_precision = np.linalg.inv(prediction.covar)
    info_prediction_mean = prediction_precision @ prediction.state_vector

    info_prediction = InformationStatePrediction(info_prediction_mean,
                                                 prediction_precision)

    # Initialise a information form of the Kalman updater
    updater = UpdaterClass(measurement_model=measurement_model)

    # Perform and assert state update (without measurement prediction)
    posterior = updater.update(
        SingleHypothesis(prediction=info_prediction, measurement=measurement))

    # Check to see if the information matrix is positive definite (i.e. are all the eigenvalues
    # positive?)
    assert (np.all(np.linalg.eigvals(posterior.precision) >= 0))

    # Does the measurement prediction work?
    assert (np.allclose(
        kupdater.predict_measurement(prediction).state_vector,
        updater.predict_measurement(info_prediction).state_vector,
        0,
        atol=1.e-14))

    # Do the
    assert (np.allclose(
        kposterior.state_vector,
        np.linalg.inv(posterior.precision) @ posterior.state_vector,
        0,
        atol=1.e-14))
    assert (np.allclose(kposterior.covar,
                        np.linalg.inv(posterior.precision),
                        0,
                        atol=1.e-14))
    assert (np.array_equal(posterior.hypothesis.prediction, info_prediction))

    assert (np.array_equal(posterior.hypothesis.measurement, measurement))
    assert (posterior.timestamp == prediction.timestamp)

    # test that we can get to the inverse matrix
    class LinearGaussianwithInverse(LinearGaussian):
        def inverse_covar(self, **kwargs):
            return np.linalg.inv(self.covar(**kwargs))

    meas_model_winv = LinearGaussianwithInverse(ndim_state=2,
                                                mapping=[0],
                                                noise_covar=np.array([[0.04]]))
    updater_winv = UpdaterClass(meas_model_winv)

    # Test this still works
    post_from_inv = updater_winv.update(
        SingleHypothesis(prediction=info_prediction, measurement=measurement))
    # and check
    assert (np.allclose(posterior.state_vector,
                        post_from_inv.state_vector,
                        0,
                        atol=1.e-14))

    # Can one force symmetric covariance?
    updater.force_symmetric_covariance = True
    posterior = updater.update(
        SingleHypothesis(prediction=info_prediction, measurement=measurement))

    assert (np.allclose(posterior.precision - posterior.precision.T,
                        np.zeros(np.shape(posterior.precision)),
                        0,
                        atol=1.e-14))
Exemple #7
0
# associated explicitly. This is done by way of a :class:`~.Hypothesis`, the most simple of which
# is a :class:`~.SingleHypothesis` which associates a single predicted state with a single
# detection. There is much more detail on how the :class:`~.Hypothesis` class is used in later
# tutorials.
from stonesoup.types.hypothesis import SingleHypothesis

# %%
# With this, we'll now loop through our measurements, predicting and updating at each timestep.
# Uncontroversially, a Predictor has :meth:`predict` function and an Updater an :meth:`update` to
# do this. Storing the information is facilitated by the top-level :class:`~.Track` class which
# holds a sequence of states.
from stonesoup.types.track import Track
track = Track()
for measurement in measurements:
    prediction = predictor.predict(prior, timestamp=measurement.timestamp)
    hypothesis = SingleHypothesis(
        prediction, measurement)  # Group a prediction and measurement
    post = updater.update(hypothesis)
    track.append(post)
    prior = track[-1]

# %%
# Plot the resulting track, including uncertainty ellipses
ax.plot([state.state_vector[0] for state in track],
        [state.state_vector[2] for state in track],
        marker=".")

from matplotlib.patches import Ellipse
for state in track:
    w, v = np.linalg.eig(measurement_model.matrix() @ state.covar
                         @ measurement_model.matrix().T)
    max_ind = np.argmax(w)
    def track(self):
        """
        todo
        :return:
        """
        # create list for storing kalman gains
        kf_gains_radar = []
        kf_gains_ais = []

        # create list for storing transition_noise_covar
        transition_covars_radar = []
        transition_covars_ais = []

        # create list for storing tranisition matrixes
        transition_matrixes_radar = []
        transition_matrixes_ais = []

        # create list for storing tracks
        tracks_radar = Track()
        tracks_ais = Track()

        # track
        for measurement in self.measurements_radar:
            prediction = self.predictor_radar.predict(
                self.prior_radar, timestamp=measurement.timestamp)
            hypothesis = SingleHypothesis(prediction, measurement)
            # calculate the kalman gain
            hypothesis.measurement_prediction = self.updater_radar.predict_measurement(
                hypothesis.prediction,
                measurement_model=self.measurement_model_radar)
            post_cov, kalman_gain = self.updater_radar._posterior_covariance(
                hypothesis)
            kf_gains_radar.append(kalman_gain)
            # get the transition model covar NOTE; same for AIS and radar. Name change not a bug
            predict_over_interval = measurement.timestamp - self.prior_radar.timestamp
            transition_covars_radar.append(
                self.transition_model_radar.covar(
                    time_interval=predict_over_interval))
            transition_matrixes_radar.append(
                self.transition_model_radar.matrix(
                    time_interval=predict_over_interval))
            # update
            post = self.updater_radar.update(hypothesis)
            tracks_radar.append(post)
            self.prior_radar = post

        for measurement in self.measurements_ais:
            prediction = self.predictor_ais.predict(
                self.prior_ais, timestamp=measurement.timestamp)
            hypothesis = SingleHypothesis(prediction, measurement)
            # calculate the kalman gain
            hypothesis.measurement_prediction = self.updater_ais.predict_measurement(
                hypothesis.prediction,
                measurement_model=self.measurement_model_ais)
            post_cov, kalman_gain = self.updater_ais._posterior_covariance(
                hypothesis)
            kf_gains_ais.append(kalman_gain)
            # get the transition model covar
            predict_over_interval = measurement.timestamp - self.prior_ais.timestamp
            transition_covars_ais.append(
                self.transition_model_ais.covar(
                    time_interval=predict_over_interval))
            transition_matrixes_ais.append(
                self.transition_model_ais.matrix(
                    time_interval=predict_over_interval))
            # update
            post = self.updater_ais.update(hypothesis)
            tracks_ais.append(post)
            self.prior_ais = post

        # FOR NOW: run track_to_track_association here, todo change pipeline flow
        # FOR NOW: run the association only when both have a new posterior (so each time the AIS has a posterior)
        # todo handle fusion when one track predicts and the other updates. (or both predicts) (Can't be done with the theory
        #  described in the article)

        cross_cov_ij = [np.zeros([4, 4])]
        cross_cov_ji = [np.zeros([4, 4])]

        # TODO change flow to assume that the indexes decide whether its from the same iterations
        # use indexes to loop through tracks, kf_gains etc

        tracks_fused = []
        # tracks_fused.append(tracks_radar[0])
        for i in range(1, len(tracks_radar)):
            # we assume that the indexes correlates with the timestamps. I.e. that the lists are 'synchronized'
            # check to make sure
            if tracks_ais[i].timestamp == tracks_radar[i].timestamp:
                # calculate the cross-covariance estimation error
                cross_cov_ij.append(
                    calc_cross_cov_estimate_error(
                        self.measurement_model_radar.matrix(),
                        self.measurement_model_ais.matrix(), kf_gains_radar[i],
                        kf_gains_ais[i], transition_matrixes_radar[i],
                        transition_covars_ais[i], cross_cov_ij[i - 1]))
                cross_cov_ji.append(
                    calc_cross_cov_estimate_error(
                        self.measurement_model_ais.matrix(),
                        self.measurement_model_radar.matrix(), kf_gains_ais[i],
                        kf_gains_radar[i], transition_matrixes_ais[i],
                        transition_covars_radar[i], cross_cov_ji[i - 1]))

                # test for track association
                # same_target = track_to_track_association.test_association_dependent_tracks(tracks_radar[i],
                #                                                                            tracks_ais[i],
                #                                                                            cross_cov_ij[i],
                #                                                                            cross_cov_ji[i], 0.01)
                same_target = True  # ignore test for track association for now
                if same_target:
                    fused_posterior, fused_covar = track_to_track_fusion.fuse_dependent_tracks(
                        tracks_radar[i], tracks_ais[i], cross_cov_ij[i],
                        cross_cov_ji[i])
                    estimate = GaussianState(fused_posterior,
                                             fused_covar,
                                             timestamp=tracks_ais[i].timestamp)
                    tracks_fused.append(estimate)
        return tracks_fused, tracks_ais, tracks_radar
def test_kalman_smoother(SmootherClass):

    # First create a track from some detections and then smooth - check the output.

    # Setup list of Detections
    start = datetime.now()
    times = [start + timedelta(seconds=i) for i in range(0, 5)]

    measurements = [
        np.array([[2.486559674128609]]),
        np.array([[2.424165626519697]]),
        np.array([[6.603176662762473]]),
        np.array([[9.329099124074590]]),
        np.array([[14.637975326666801]]),
    ]

    detections = [
        Detection(m, timestamp=timest)
        for m, timest in zip(measurements, times)
    ]

    # Setup models.
    trans_model = ConstantVelocity(noise_diff_coeff=1)
    meas_model = LinearGaussian(ndim_state=2,
                                mapping=[0],
                                noise_covar=np.array([[0.4]]))

    # Tracking components
    predictor = KalmanPredictor(transition_model=trans_model)
    updater = KalmanUpdater(measurement_model=meas_model)

    # Prior
    cstate = GaussianState(np.ones([2, 1]), np.eye(2), timestamp=start)
    track = Track()

    for detection in detections:
        # Predict
        pred = predictor.predict(cstate, timestamp=detection.timestamp)
        # form hypothesis
        hypothesis = SingleHypothesis(pred, detection)
        # Update
        cstate = updater.update(hypothesis)
        # write to track
        track.append(cstate)

    smoother = SmootherClass(transition_model=trans_model)
    smoothed_track = smoother.smooth(track)
    smoothed_state_vectors = [state.state_vector for state in smoothed_track]

    # Verify Values
    target_smoothed_vectors = [
        np.array([[1.688813974839928], [1.267196351952188]]),
        np.array([[3.307200214998506], [2.187167840595264]]),
        np.array([[6.130402001958210], [3.308896367021604]]),
        np.array([[9.821303658438408], [4.119557021638030]]),
        np.array([[14.257730973981149], [4.594862462495096]])
    ]

    assert np.allclose(smoothed_state_vectors, target_smoothed_vectors)

    # Check that a prediction is smoothable and that no error chucked
    # Also remove the transition model and use the one provided by the smoother
    track[1] = GaussianStatePrediction(pred.state_vector,
                                       pred.covar,
                                       timestamp=pred.timestamp)
    smoothed_track2 = smoother.smooth(track)
    assert isinstance(smoothed_track2[1], GaussianStatePrediction)

    # Check appropriate error chucked if not GaussianStatePrediction/Update
    track[-1] = detections[-1]
    with pytest.raises(TypeError):
        smoother._prediction(track[-1])
Exemple #10
0
def test_sqrt_kalman():
    measurement_model = LinearGaussian(ndim_state=2,
                                       mapping=[0],
                                       noise_covar=np.array([[0.04]]))
    prediction = GaussianStatePrediction(
        np.array([[-6.45], [0.7]]),
        np.array([[4.1123, 0.0013], [0.0013, 0.0365]]))
    sqrt_prediction = SqrtGaussianState(prediction.state_vector,
                                        np.linalg.cholesky(prediction.covar))
    measurement = Detection(np.array([[-6.23]]))

    # Calculate evaluation variables
    eval_measurement_prediction = GaussianMeasurementPrediction(
        measurement_model.matrix() @ prediction.mean,
        measurement_model.matrix() @ prediction.covar
        @ measurement_model.matrix().T + measurement_model.covar(),
        cross_covar=prediction.covar @ measurement_model.matrix().T)
    kalman_gain = eval_measurement_prediction.cross_covar @ np.linalg.inv(
        eval_measurement_prediction.covar)
    eval_posterior = GaussianState(
        prediction.mean + kalman_gain
        @ (measurement.state_vector - eval_measurement_prediction.mean),
        prediction.covar -
        kalman_gain @ eval_measurement_prediction.covar @ kalman_gain.T)

    # Test Square root form returns the same as standard form
    updater = KalmanUpdater(measurement_model=measurement_model)
    sqrt_updater = SqrtKalmanUpdater(measurement_model=measurement_model,
                                     qr_method=False)
    qr_updater = SqrtKalmanUpdater(measurement_model=measurement_model,
                                   qr_method=True)

    posterior = updater.update(
        SingleHypothesis(prediction=prediction, measurement=measurement))
    posterior_s = sqrt_updater.update(
        SingleHypothesis(prediction=sqrt_prediction, measurement=measurement))
    posterior_q = qr_updater.update(
        SingleHypothesis(prediction=sqrt_prediction, measurement=measurement))

    assert np.allclose(posterior_s.mean, eval_posterior.mean, 0, atol=1.e-14)
    assert np.allclose(posterior_q.mean, eval_posterior.mean, 0, atol=1.e-14)
    assert np.allclose(posterior.covar, eval_posterior.covar, 0, atol=1.e-14)
    assert np.allclose(eval_posterior.covar,
                       posterior_s.sqrt_covar @ posterior_s.sqrt_covar.T,
                       0,
                       atol=1.e-14)
    assert np.allclose(posterior.covar,
                       posterior_s.sqrt_covar @ posterior_s.sqrt_covar.T,
                       0,
                       atol=1.e-14)
    assert np.allclose(posterior.covar,
                       posterior_q.sqrt_covar @ posterior_q.sqrt_covar.T,
                       0,
                       atol=1.e-14)
    # I'm not sure this is going to be true in all cases. Keep in order to find edge cases
    assert np.allclose(posterior_s.covar, posterior_q.covar, 0, atol=1.e-14)

    # Next create a prediction with a covariance that will cause problems
    prediction = GaussianStatePrediction(
        np.array([[-6.45], [0.7]]), np.array([[1e24, 1e-24], [1e-24, 1e24]]))
    sqrt_prediction = SqrtGaussianState(prediction.state_vector,
                                        np.linalg.cholesky(prediction.covar))

    posterior = updater.update(
        SingleHypothesis(prediction=prediction, measurement=measurement))
    posterior_s = sqrt_updater.update(
        SingleHypothesis(prediction=sqrt_prediction, measurement=measurement))
    posterior_q = qr_updater.update(
        SingleHypothesis(prediction=sqrt_prediction, measurement=measurement))

    # The new posterior will  be
    eval_posterior = GaussianState(
        prediction.mean + kalman_gain
        @ (measurement.state_vector - eval_measurement_prediction.mean),
        np.array([[0.04, 0], [
            0, 1e24
        ]]))  # Accessed by looking through the Decimal() quantities...
    # It's actually [0.039999999999 1e-48], [1e-24 1e24 + 1e-48]] ish

    # Test that the square root form succeeds where the standard form fails
    assert not np.allclose(posterior.covar, eval_posterior.covar, rtol=5.e-3)
    assert np.allclose(posterior_s.sqrt_covar @ posterior_s.sqrt_covar.T,
                       eval_posterior.covar,
                       rtol=5.e-3)
    assert np.allclose(posterior_q.sqrt_covar @ posterior_s.sqrt_covar.T,
                       eval_posterior.covar,
                       rtol=5.e-3)
Exemple #11
0
def test_kalman(UpdaterClass, measurement_model, prediction, measurement):

    # Calculate evaluation variables
    eval_measurement_prediction = GaussianMeasurementPrediction(
        measurement_model.matrix() @ prediction.mean,
        measurement_model.matrix() @ prediction.covar
        @ measurement_model.matrix().T + measurement_model.covar(),
        cross_covar=prediction.covar @ measurement_model.matrix().T)
    kalman_gain = eval_measurement_prediction.cross_covar @ np.linalg.inv(
        eval_measurement_prediction.covar)
    eval_posterior = GaussianState(
        prediction.mean + kalman_gain
        @ (measurement.state_vector - eval_measurement_prediction.mean),
        prediction.covar -
        kalman_gain @ eval_measurement_prediction.covar @ kalman_gain.T)

    # Initialise a kalman updater
    updater = UpdaterClass(measurement_model=measurement_model)

    # Get and assert measurement prediction
    measurement_prediction = updater.predict_measurement(prediction)
    assert (np.allclose(measurement_prediction.mean,
                        eval_measurement_prediction.mean,
                        0,
                        atol=1.e-14))
    assert (np.allclose(measurement_prediction.covar,
                        eval_measurement_prediction.covar,
                        0,
                        atol=1.e-14))
    assert (np.allclose(measurement_prediction.cross_covar,
                        eval_measurement_prediction.cross_covar,
                        0,
                        atol=1.e-14))

    # Perform and assert state update (without measurement prediction)
    posterior = updater.update(
        SingleHypothesis(prediction=prediction, measurement=measurement))
    assert (np.allclose(posterior.mean, eval_posterior.mean, 0, atol=1.e-14))
    assert (np.allclose(posterior.covar, eval_posterior.covar, 0, atol=1.e-14))
    assert (np.array_equal(posterior.hypothesis.prediction, prediction))
    assert (np.allclose(
        posterior.hypothesis.measurement_prediction.state_vector,
        measurement_prediction.state_vector,
        0,
        atol=1.e-14))
    assert (np.allclose(posterior.hypothesis.measurement_prediction.covar,
                        measurement_prediction.covar,
                        0,
                        atol=1.e-14))
    assert (np.array_equal(posterior.hypothesis.measurement, measurement))
    assert (posterior.timestamp == prediction.timestamp)

    # Perform and assert state update
    posterior = updater.update(
        SingleHypothesis(prediction=prediction,
                         measurement=measurement,
                         measurement_prediction=measurement_prediction))
    assert (np.allclose(posterior.mean, eval_posterior.mean, 0, atol=1.e-14))
    assert (np.allclose(posterior.covar, eval_posterior.covar, 0, atol=1.e-14))
    assert (np.array_equal(posterior.hypothesis.prediction, prediction))
    assert (np.allclose(
        posterior.hypothesis.measurement_prediction.state_vector,
        measurement_prediction.state_vector,
        0,
        atol=1.e-14))
    assert (np.allclose(posterior.hypothesis.measurement_prediction.covar,
                        measurement_prediction.covar,
                        0,
                        atol=1.e-14))
    assert (np.array_equal(posterior.hypothesis.measurement, measurement))
    assert (posterior.timestamp == prediction.timestamp)
Exemple #12
0
    def track_async(self, start_time, measurements_radar, measurements_ais, fusion_rate=1):
        """
        Assumptions:
        1) assumes that there are a maximum of one new measurement per sensor per fusion_rate.
        2) assumes that the measurements arrives exactly at the timestep that the fusion is performed.
        3) assumes kf gain of size (4,2)
        """
        # create list for storing tracks
        tracks_radar = Track()
        tracks_ais = Track()
        tracks_fused = []

        time = start_time

        cross_cov_ij = np.zeros([4, 4])
        cross_cov_ji = np.zeros([4, 4])

        measurements_radar = measurements_radar.copy()
        measurements_ais = measurements_ais.copy()
        # loop until there are no more measurements
        while measurements_radar or measurements_ais:
            # get all new measurements
            new_measurements_radar = \
                [measurement for measurement in measurements_radar if measurement.timestamp <= time]
            new_measurements_ais = \
                [measurement for measurement in measurements_ais if measurement.timestamp <= time]

            # remove the new measurements from the measurements lists
            for new_meas in new_measurements_ais:
                measurements_ais.remove(new_meas)
            for new_meas in new_measurements_radar:
                measurements_radar.remove(new_meas)

            # check whether there are more than one measurement per sensor
            if len(new_measurements_ais) > 1 or len(new_measurements_radar) > 1:
                # raise exception
                raise Exception("More than one measurement per sensor per fusion rate")

            # for each sensor, perform a prediction
            prediction_radar = self.predictor_radar.predict(self.prior_radar, timestamp=time)
            prediction_ais = self.predictor_ais.predict(self.prior_ais, timestamp=time)
            # if a new AIS measurement
            if new_measurements_ais:
                measurement = new_measurements_ais[0]
                # calc updated estimate
                hypothesis = SingleHypothesis(prediction_ais, measurement)
                # calc kalman gain
                # calculate the kalman gain
                hypothesis.measurement_prediction = self.updater_ais.predict_measurement(hypothesis.prediction,
                                                                                         measurement_model=self.measurement_model_ais)
                post_cov, kf_gain_ais = self.updater_ais._posterior_covariance(hypothesis)
                # get the transition model covar
                predict_over_interval = measurement.timestamp - self.prior_ais.timestamp
                # calc transition matrix
                transition_covar_ais = self.transition_model_ais.covar(time_interval=predict_over_interval)
                transition_matrix_ais = self.transition_model_ais.matrix(time_interval=predict_over_interval)
                # calc posterior
                post = self.updater_ais.update(hypothesis)
                # append posterior and update prior_ais
                tracks_ais.append(post)
                self.prior_ais = post
            else:
                # calc transition matrix and set kalman gain to 0
                # get the transition model covar
                predict_over_interval = time - self.prior_ais.timestamp
                # calc transition matrix
                transition_covar_ais = self.transition_model_ais.covar(time_interval=predict_over_interval)
                transition_matrix_ais = self.transition_model_ais.matrix(time_interval=predict_over_interval)
                # set kalman gain to 0
                kf_gain_ais = Matrix([[0, 0], [0, 0], [0, 0], [0, 0]])
                # append prediction and update prior_ais
                tracks_ais.append(prediction_ais)
                self.prior_ais = prediction_ais

            # if a new radar measurement
            if new_measurements_radar:
                measurement = new_measurements_radar[0]
                # calc updated estimate
                hypothesis = SingleHypothesis(prediction_radar, measurement)
                # calc kalman gain
                # calculate the kalman gain
                hypothesis.measurement_prediction = self.updater_radar.predict_measurement(hypothesis.prediction,
                                                                                           measurement_model=self.measurement_model_radar)
                post_cov, kf_gain_radar = self.updater_radar._posterior_covariance(hypothesis)
                # get the transition model covar
                predict_over_interval = measurement.timestamp - self.prior_radar.timestamp
                # calc transition matrix
                transition_covar_radar = self.transition_model_radar.covar(time_interval=predict_over_interval)
                transition_matrix_radar = self.transition_model_radar.matrix(time_interval=predict_over_interval)
                # calc posterior
                post = self.updater_radar.update(hypothesis)
                # append posterior and update prior_radar
                self.prior_radar = post
            else:
                # calc transition matrix and set kalman gain to 0
                # get the transition model covar
                predict_over_interval = time - self.prior_radar.timestamp
                # calc transition matrix
                transition_covar_radar = self.transition_model_radar.covar(time_interval=predict_over_interval)
                transition_matrix_radar = self.transition_model_radar.matrix(time_interval=predict_over_interval)
                # set kalman gain to 0
                kf_gain_radar = Matrix([[0, 0], [0, 0], [0, 0], [0, 0]])
                # append prediction and update prior_radar
                self.prior_radar = prediction_radar

            # calculate the cross-covariance
            cross_cov_ij = calc_cross_cov_estimate_error(
                self.measurement_model_radar.matrix(), self.measurement_model_ais.matrix(), kf_gain_radar,
                kf_gain_ais, transition_matrix_radar, transition_covar_radar, cross_cov_ij
            )
            cross_cov_ji = calc_cross_cov_estimate_error(
                self.measurement_model_ais.matrix(), self.measurement_model_radar.matrix(), kf_gain_ais,
                kf_gain_radar, transition_matrix_ais, transition_covar_ais, cross_cov_ji
            )

            same_target = True  # ignore test for track association for now
            if same_target:
                fused_posterior, fused_covar = track_to_track_fusion.fuse_dependent_tracks(self.prior_radar,
                                                                                           self.prior_ais,
                                                                                           cross_cov_ij,
                                                                                           cross_cov_ji)
                estimate = GaussianState(fused_posterior, fused_covar, timestamp=time)
                tracks_fused.append(estimate)
                # try T2TFwoMpF
                # also have to update the cross-covariance
                cross_cov_ij = calc_partial_feedback_cross_cov(self.prior_radar, self.prior_ais, cross_cov_ij,
                                                               cross_cov_ji)
                cross_cov_ji = cross_cov_ij.copy().T  # right??
                # TEMPORARY: try to let prior radar become the fused result, i.e. partial feedback
                self.prior_radar = estimate
                # append to radar tracks
                tracks_radar.append(estimate)

            self.cross_cov_list.append(cross_cov_ij)
            time += timedelta(seconds=fusion_rate)
        return tracks_fused, tracks_radar, tracks_ais
Exemple #13
0
def test_alphabeta(measurement_model, prediction, measurement, alpha, beta):

    # Time delta
    timediff = timedelta(seconds=2)

    # Calculate evaluation variables - converts
    # to measurement from prediction space
    eval_measurement_prediction = StateMeasurementPrediction(
        measurement_model.matrix() @ prediction.state_vector)

    eval_posterior_position = prediction.state_vector[[0, 2]] + \
        alpha * (measurement.state_vector - eval_measurement_prediction.state_vector)
    eval_posterior_velocity = prediction.state_vector[[1, 3]] + \
        beta/timediff.total_seconds() * (measurement.state_vector -
                                         eval_measurement_prediction.state_vector)

    eval_state_vect = np.concatenate(
        (eval_posterior_position, eval_posterior_velocity))
    eval_posterior = State(eval_state_vect[[0, 2, 1, 3]])

    # Initialise an Alpha-Beta updater
    updater = AlphaBetaUpdater(measurement_model=measurement_model,
                               alpha=alpha,
                               beta=beta)

    # Get and assert measurement prediction
    measurement_prediction = updater.predict_measurement(prediction)

    assert (np.allclose(measurement_prediction.state_vector,
                        eval_measurement_prediction.state_vector,
                        0,
                        atol=1.e-14))

    # Perform and assert state update (without measurement prediction)
    posterior = updater.update(SingleHypothesis(prediction=prediction,
                                                measurement=measurement),
                               time_interval=timediff)

    assert (np.allclose(posterior.state_vector,
                        eval_posterior.state_vector,
                        0,
                        atol=1.e-14))
    assert (np.array_equal(posterior.hypothesis.prediction, prediction))
    assert (np.array_equal(posterior.hypothesis.measurement, measurement))
    assert (posterior.timestamp == prediction.timestamp)

    # Check that the vmap parameter can be set
    # Check a measurement prediction can be added
    updater.vmap = np.array([1, 3])
    posterior = updater.update(SingleHypothesis(
        prediction=prediction,
        measurement=measurement,
        measurement_prediction=measurement_prediction),
                               time_interval=timediff)
    assert (np.allclose(posterior.state_vector,
                        eval_posterior.state_vector,
                        0,
                        atol=1.e-14))

    # Finally check that no model in the updater raises correct error
    updater.measurement_model = None
    with pytest.raises(ValueError):
        updater._check_measurement_model(None)
    def track(self,
              start_time,
              measurements_radar,
              measurements_ais,
              fusion_rate=1):
        """
        returns fused tracks.
        """

        time = start_time

        tracks_radar = Track()
        tracks_ais = Track()
        tracks_fused = []

        measurements_radar = measurements_radar.copy()
        measurements_ais = measurements_ais.copy()
        # loop until there are no more measurements
        while measurements_radar or measurements_ais:
            # get all new measurements
            new_measurements_radar = \
                [measurement for measurement in measurements_radar if measurement.timestamp <= time]
            new_measurements_ais = \
                [measurement for measurement in measurements_ais if measurement.timestamp <= time]

            # remove the new measurements from the measurements lists
            for new_meas in new_measurements_ais:
                measurements_ais.remove(new_meas)
            for new_meas in new_measurements_radar:
                measurements_radar.remove(new_meas)

            # for each new_meas, perform a prediction and an update
            for measurement in new_measurements_ais:
                prediction = self.predictor_ais.predict(
                    self.prior_ais, timestamp=measurement.timestamp)
                hypothesis = SingleHypothesis(prediction, measurement)
                post = self.updater_ais.update(hypothesis)
                tracks_ais.append(post)
                self.prior_ais = tracks_ais[-1]
            for measurement in new_measurements_radar:
                prediction = self.predictor_radar.predict(
                    self.prior_radar, timestamp=measurement.timestamp)
                hypothesis = SingleHypothesis(prediction, measurement)
                post = self.updater_radar.update(hypothesis)
                tracks_radar.append(post)
                self.prior_radar = tracks_radar[-1]

            # perform a prediction up until this time (the newest measurement might not be at this exact time)
            # note that this "prediction" might be the updated posterior, if the newest measurement was at this time
            prediction_radar = self.predictor_radar.predict(self.prior_radar,
                                                            timestamp=time)
            prediction_ais = self.predictor_ais.predict(self.prior_ais,
                                                        timestamp=time)

            # fuse these predictions.
            tracks_fused.append(
                self._fuse_track(prediction_radar, prediction_ais))

            time += timedelta(seconds=fusion_rate)

        return tracks_fused, tracks_radar, tracks_ais
Exemple #15
0
    def track(self, measurements_radar, measurements_ais, estimation_rate=1):
        """
        Uses the Kalman Filter to fuse the measurements received. Produces a new estimate at each estimation_rate.
        A prediction is performed when no new measurements are received when a new estimate is calculated.

        Note: when estimation_rate is lower than either of the measurements rates, it might not use all measurements
        when updating.

        :param measurements_radar:
        :param measurements_ais:
        :param estimation_rate: How often a new estimate should be calculated.
        """
        time = self.start_time
        tracks_fused = Track()
        tracks_radar = Track()

        # copy measurements
        measurements_radar = measurements_radar.copy()
        measurements_ais = measurements_ais.copy()
        # loop until there are no more measurements
        while measurements_ais or measurements_radar:
            # get all new measurements
            new_measurements_radar = \
                [measurement for measurement in measurements_radar if measurement.timestamp <= time]
            new_measurements_ais = \
                [measurement for measurement in measurements_ais if measurement.timestamp <= time]

            # remove the new measurements from the measurements lists
            for new_meas in new_measurements_ais:
                measurements_ais.remove(new_meas)
            for new_meas in new_measurements_radar:
                measurements_radar.remove(new_meas)

            # sort the new measurements
            new_measurements_radar.sort(key=lambda meas: meas.timestamp, reverse=True)
            new_measurements_ais.sort(key=lambda meas: meas.timestamp, reverse=True)

            while new_measurements_radar or new_measurements_ais:
                if new_measurements_radar and \
                        (not new_measurements_ais or
                         new_measurements_radar[0].timestamp <= new_measurements_ais[0].timestamp):
                    # predict and update with radar measurement
                    new_measurement = new_measurements_radar[0]
                    prediction = self.predictor.predict(self.prior, timestamp=new_measurement.timestamp)
                    hypothesis = SingleHypothesis(prediction, new_measurement)
                    post = self.updater_radar.update(hypothesis)
                    tracks_radar.append(post)
                    # remove measurement
                    new_measurements_radar.remove(new_measurement)
                else:
                    # predict and update with radar measurement
                    new_measurement = new_measurements_ais[0]
                    prediction = self.predictor.predict(self.prior, timestamp=new_measurement.timestamp)
                    hypothesis = SingleHypothesis(prediction, new_measurement)
                    post = self.updater_ais.update(hypothesis)
                    # remove measurement
                    new_measurements_ais.remove(new_measurement)

                # add to fused list
                self.prior = post

            # perform a prediction up until this time (the newest measurement might not be at this exact time)
            # note that this "prediction" might be the updated posterior, if the newest measurement was at this time
            prediction = self.predictor.predict(self.prior, timestamp=time)
            tracks_fused.append(GaussianState(prediction.mean, prediction.covar, prediction.timestamp))

            # increment time
            time += timedelta(seconds=estimation_rate)

        return tracks_fused, tracks_radar
# Running the Extended Kalman Filter

from stonesoup.types.state import GaussianState
prior = GaussianState([[0], [1], [0], [1]],
                      np.diag([1, 1, 1, 1]),
                      timestamp=start_time)

from stonesoup.types.hypothesis import SingleHypothesis
from stonesoup.types.track import Track

track = Track()
for measurement in measurements:
    prediction = predictor.predict(prior, timestamp=measurement.timestamp)
    hypothesis = SingleHypothesis(
        prediction,
        measurement)  # Used to group a prediction and measurement together
    post = updater.update(hypothesis)
    track.append(post)
    prior = track[-1]

# Plot the resulting track
ax.plot([state.state_vector[0, 0] for state in track],
        [state.state_vector[2, 0] for state in track],
        marker=".")
fig

from matplotlib.patches import Ellipse
HH = np.array([[1., 0., 0., 0.], [0., 0., 1., 0.]])
for state in track:
    w, v = np.linalg.eig(HH @ state.covar @ HH.T)
Exemple #17
0
detector1 = beamformers_2d.capon(data_file)
detector2 = beamformers_2d.rjmcmc(data_file)

from stonesoup.types.hypothesis import SingleHypothesis

from stonesoup.types.track import Track
track1 = Track()
track2 = Track()

print("Capon detections:")
for timestep, detections in detector1:
    for detection in detections:
        print(detection)
        prediction = predictor.predict(prior, timestamp=detection.timestamp)
        hypothesis = SingleHypothesis(
            prediction, detection)  # Group a prediction and measurement
        post = updater.update(hypothesis)
        track1.append(post)
        prior = track1[-1]

print("RJMCMC detections:")
for timestep, detections in detector2:
    for detection in detections:
        print(detection)
        prediction = predictor.predict(prior, timestamp=detection.timestamp)
        hypothesis = SingleHypothesis(
            prediction, detection)  # Group a prediction and measurement
        post = updater.update(hypothesis)
        track2.append(post)
        prior = track2[-1]
Exemple #18
0
def test_track_metadata():
    track = Track()
    assert track.metadata == {}
    assert not track.metadatas

    track = Track(init_metadata={'colour': 'blue'})

    assert track.metadata == {'colour': 'blue'}
    assert not track.metadatas

    state = Update(
        hypothesis=SingleHypothesis(None, Detection(np.array([[0]]), metadata={'side': 'ally'}))
    )
    track.append(state)
    assert track.metadata == {'colour': 'blue', 'side': 'ally'}
    assert len(track.metadatas) == 1
    assert track.metadata == track.metadatas[-1]

    state = Update(
        hypothesis=SingleHypothesis(None, Detection(np.array([[0]]), metadata={'side': 'enemy'}))
    )
    track.append(state)
    assert track.metadata == {'colour': 'blue', 'side': 'enemy'}
    assert len(track.metadatas) == 2

    state = Update(
        hypothesis=SingleHypothesis(None, Detection(np.array([[0]]), metadata={'colour': 'red'}))
    )
    track[0] = state
    assert track.metadata == track.metadatas[-1] == {'colour': 'red', 'side': 'enemy'}
    assert len(track.metadatas) == 2
    assert track.metadatas[0] == {'colour': 'red'}

    state = Update(
        hypothesis=SingleHypothesis(None, Detection(np.array([[0]]), metadata={'speed': 'fast'}))
    )
    track.insert(1, state)
    assert track.metadata == {'colour': 'red', 'side': 'enemy', 'speed': 'fast'}
    assert len(track.metadatas) == 3
    assert track.metadatas[0] == {'colour': 'red'}
    assert track.metadatas[1] == {'colour': 'red', 'speed': 'fast'}
    assert track.metadatas[2] == {'colour': 'red', 'side': 'enemy', 'speed': 'fast'}

    state = Update(
        hypothesis=SingleHypothesis(None, Detection(np.array([[0]]), metadata={'size': 'small'}))
    )
    track.insert(-1, state)
    assert track.metadata == {'colour': 'red', 'side': 'enemy', 'speed': 'fast', 'size': 'small'}
    assert len(track.metadatas) == 4
    assert track.metadatas[0] == {'colour': 'red'}
    assert track.metadatas[1] == {'colour': 'red', 'speed': 'fast'}
    assert track.metadatas[2] == {'colour': 'red', 'speed': 'fast', 'size': 'small'}
    assert track.metadatas[3] == \
           {'colour': 'red', 'side': 'enemy', 'speed': 'fast', 'size': 'small'}

    state = Update(
        hypothesis=SingleHypothesis(None, Detection(np.array([[0]]), metadata={'colour': 'black'}))
    )
    track.insert(-100, state)
    assert track.metadata == {'colour': 'red', 'side': 'enemy', 'speed': 'fast', 'size': 'small'}
    assert len(track.metadatas) == 5
    assert track.metadatas[0] == {'colour': 'black'}
    assert track.metadatas[1] == {'colour': 'red'}
    assert track.metadatas[2] == {'colour': 'red', 'speed': 'fast'}
    assert track.metadatas[3] == {'colour': 'red', 'size': 'small', 'speed': 'fast'}
    assert track.metadatas[4] == \
           {'colour': 'red', 'side': 'enemy', 'speed': 'fast', 'size': 'small'}

    state = Update(
        hypothesis=SingleHypothesis(None, Detection(np.array([[0]]), metadata={'colour': 'black'}))
    )
    track.insert(100, state)
    assert track.metadata == {'colour': 'black', 'side': 'enemy', 'speed': 'fast', 'size': 'small'}
    assert len(track.metadatas) == 6
    assert track.metadatas[0] == {'colour': 'black'}
    assert track.metadatas[1] == {'colour': 'red'}
    assert track.metadatas[2] == {'colour': 'red', 'speed': 'fast'}
    assert track.metadatas[3] == {'colour': 'red', 'size': 'small', 'speed': 'fast'}
    assert track.metadatas[4] == \
           {'colour': 'red', 'side': 'enemy', 'speed': 'fast', 'size': 'small'}
    assert track.metadatas[5] == \
           {'colour': 'black', 'side': 'enemy', 'speed': 'fast', 'size': 'small'}

    state = Update(
        hypothesis=SingleHypothesis(None, Detection(np.array([[0]]), metadata={'colour': 'green'}))
    )
    track.append(state)
    assert track.metadata == {'colour': 'green', 'side': 'enemy', 'speed': 'fast', 'size': 'small'}
    assert len(track.metadatas) == 7
    assert track.metadatas[0] == {'colour': 'black'}
    assert track.metadatas[1] == {'colour': 'red'}
    assert track.metadatas[2] == {'colour': 'red', 'speed': 'fast'}
    assert track.metadatas[3] == {'colour': 'red', 'size': 'small', 'speed': 'fast'}
    assert track.metadatas[4] == \
           {'colour': 'red', 'side': 'enemy', 'speed': 'fast', 'size': 'small'}
    assert track.metadatas[5] == \
           {'colour': 'black', 'side': 'enemy', 'speed': 'fast', 'size': 'small'}
    assert track.metadatas[6] == \
           {'colour': 'green', 'side': 'enemy', 'speed': 'fast', 'size': 'small'}

    state = Update(
        hypothesis=SingleHypothesis(None, Detection(np.array([[0]]), metadata={'colour': 'white'}))
    )
    track[-2] = state
    assert track.metadata == {'colour': 'green', 'side': 'enemy', 'speed': 'fast', 'size': 'small'}
    assert len(track.metadatas) == 7
    assert track.metadatas[0] == {'colour': 'black'}
    assert track.metadatas[1] == {'colour': 'red'}
    assert track.metadatas[2] == {'colour': 'red', 'speed': 'fast'}
    assert track.metadatas[3] == {'colour': 'red', 'size': 'small', 'speed': 'fast'}
    assert track.metadatas[4] == \
           {'colour': 'red', 'side': 'enemy', 'speed': 'fast', 'size': 'small'}
    assert track.metadatas[5] == \
           {'colour': 'white', 'side': 'enemy', 'speed': 'fast', 'size': 'small'}
    assert track.metadatas[6] == \
           {'colour': 'green', 'side': 'enemy', 'speed': 'fast', 'size': 'small'}
Exemple #19
0
measurement_model = SDFMessmodell(
    4,  # Dimensionen (Position and Geschwindigkeit in 2D)
    (0, 2),  # Mapping
)
updater = SDFUpdater(measurement_model)
"""Erstellen eines Anfangszustandes"""
prior = GaussianState([[0.0], [0.0], [0.0], [0.0]],
                      np.diag([0.0, 0.0, 0.0, 0.0]),
                      timestamp=0)
"""Erstellen einer Trajektorie, sodass das Filter arbeiten kann"""
track = Track()

for measurement in measurements:
    prediction = predictor.predict(prior, timestamp=measurement.timestamp)

    hypothesis = SingleHypothesis(prediction, measurement)

    post = updater.update(hypothesis, measurement_model)

    track.append(post)

    prior = track[-1]

# Plot
ax.plot([state.state_vector[0] for state in track],
        [state.state_vector[2] for state in track],
        marker=".",
        color="yellow")
"""Darstellen der Kovarianz durch Ellipsen"""
for state in track:
    w, v = np.linalg.eig(measurement_model.matrix() @ state.covar
Exemple #20
0
    def _smooth_traj(self, traj, process_noise_std=0.5, measurement_noise_std=1):
        # Get detector
        detector = self._get_detector(traj)

        # Models
        if not isinstance(process_noise_std, (list, tuple, np.ndarray)):
            process_noise_std = [process_noise_std, process_noise_std]
        if not isinstance(measurement_noise_std, (list, tuple, np.ndarray)):
            measurement_noise_std = [measurement_noise_std, measurement_noise_std]
        transition_model = CombinedLinearGaussianTransitionModel(
            [
                ConstantVelocity(process_noise_std[0] ** 2),
                ConstantVelocity(process_noise_std[1] ** 2),
            ]
        )
        measurement_model = LinearGaussian(
            ndim_state=4,
            mapping=[0, 2],
            noise_covar=np.diag(
                [measurement_noise_std[0] ** 2, measurement_noise_std[1] ** 2]
            ),
        )
        # Predictor and updater
        predictor = KalmanPredictor(transition_model)
        updater = KalmanUpdater(measurement_model)
        # Initiator
        state_vector = StateVector([0.0, 0.0, 0.0, 0.0])
        covar = CovarianceMatrix(np.diag([0.0, 0.0, 0.0, 0.0]))
        prior_state = GaussianStatePrediction(state_vector, covar)
        initiator = SimpleMeasurementInitiator(prior_state, measurement_model)
        # Filtering
        track = None
        for i, (timestamp, detections) in enumerate(detector):
            if i == 0:
                tracks = initiator.initiate(detections, timestamp)
                track = tracks.pop()
            else:
                detection = detections.pop()
                prediction = predictor.predict(track.state, timestamp=timestamp)
                hypothesis = SingleHypothesis(prediction, detection)
                posterior = updater.update(hypothesis)
                track.append(posterior)
        # Smoothing
        smoother = KalmanSmoother(transition_model)
        smooth_track = smoother.smooth(track)

        # Create new trajectory
        if traj.is_latlon:
            df = traj.df.to_crs("EPSG:3395")
            df.geometry = [
                Point(state.state_vector[0], state.state_vector[2])
                for state in smooth_track
            ]
            df.to_crs(traj.crs, inplace=True)
        else:
            df = traj.df.copy()
            df.geometry = [
                Point(state.state_vector[0], state.state_vector[2])
                for state in smooth_track
            ]
        new_traj = Trajectory(df, traj.id)
        return new_traj
transition_covars_radar = []
transition_covars_ais = []

# create list for storing tranisition matrixes
transition_matrixes_radar = []
transition_matrixes_ais = []

# create list for storing tracks
tracks_radar = Track()
tracks_ais = Track()

# track
for measurement in measurements_radar:
    prediction = predictor_radar.predict(prior_radar,
                                         timestamp=measurement.timestamp)
    hypothesis = SingleHypothesis(prediction, measurement)
    # calculate the kalman gain
    hypothesis.measurement_prediction = updater_radar.predict_measurement(
        hypothesis.prediction, measurement_model=measurement_model_radar)
    post_cov, kalman_gain = updater_radar._posterior_covariance(hypothesis)
    kf_gains_radar.append(kalman_gain)
    # get the transition model covar
    predict_over_interval = measurement.timestamp - prior_radar.timestamp
    transition_covars_ais.append(
        transition_model_ais.covar(time_interval=predict_over_interval))
    transition_matrixes_ais.append(
        transition_model_ais.matrix(time_interval=predict_over_interval))
    # update
    post = updater_radar.update(hypothesis)
    tracks_radar.append(post)
    prior_radar = tracks_radar[-1]
Exemple #22
0
def test_phd_single_component_update(UpdaterClass, measurement_model,
                                     prediction, measurement):
    eval_measurement_prediction = GaussianMeasurementPrediction(
        measurement_model.matrix() @ prediction.mean,
        measurement_model.matrix() @ prediction.covar
        @ measurement_model.matrix().T + measurement_model.covar(),
        cross_covar=prediction.covar @ measurement_model.matrix().T)
    kalman_gain = eval_measurement_prediction.cross_covar @ np.linalg.inv(
        eval_measurement_prediction.covar)
    eval_posterior = GaussianState(
        prediction.mean + kalman_gain
        @ (measurement.state_vector - eval_measurement_prediction.mean),
        prediction.covar -
        kalman_gain @ eval_measurement_prediction.covar @ kalman_gain.T)

    underlying_updater = UpdaterClass(measurement_model=measurement_model)
    measurement_prediction = underlying_updater.predict_measurement(prediction)

    phd_updater = PHDUpdater(updater=underlying_updater, prob_detection=0.9)
    hypotheses = [
        MultipleHypothesis(
            [SingleHypothesis(prediction=prediction,
                              measurement=measurement)]),
        MultipleHypothesis(
            [SingleHypothesis(prediction=prediction, measurement=None)])
    ]

    updated_mixture = phd_updater.update(hypotheses)
    # One for updated component, one for missed detection
    assert len(updated_mixture) == 2
    # Check updated component
    updated_component = updated_mixture[0]
    assert (np.allclose(updated_component.mean,
                        eval_posterior.mean,
                        0,
                        atol=1.e-14))
    assert (np.allclose(updated_component.covar,
                        eval_posterior.covar,
                        0,
                        atol=1.e-14))
    assert (updated_component.timestamp == measurement.timestamp)
    prob_detection = 0.9
    prob_survival = 1
    q = multivariate_normal.pdf(measurement.state_vector.flatten(),
                                mean=measurement_prediction.mean.flatten(),
                                cov=measurement_prediction.covar)
    clutter_density = 1e-26
    new_weight = (prob_detection*prediction.weight*q*prob_survival) / \
        ((prob_detection*prediction.weight*q*prob_survival)+clutter_density)
    assert (updated_component.weight == new_weight)
    # Check miss detected component
    miss_detected_component = updated_mixture[1]
    assert (np.allclose(miss_detected_component.mean,
                        prediction.mean,
                        0,
                        atol=1.e-14))
    assert (np.allclose(miss_detected_component.covar,
                        prediction.covar,
                        0,
                        atol=1.e-14))
    assert (miss_detected_component.timestamp == prediction.timestamp)
    l1 = 1
    assert (miss_detected_component.weight == prediction.weight *
            (1 - prob_detection) * l1)