コード例 #1
0
    def covar(self, timedelta=5, **kwargs):
        delta_t = timedelta
        Sigma = 5.0

        covar = np.array([[np.power(delta_t, 4) / 4,
                           np.power(delta_t, 3) / 2],
                          [np.power(delta_t, 3) / 2,
                           np.power(delta_t, 2)]]) * np.power(Sigma, 2)

        covar = block_diag(covar, covar)

        return CovarianceMatrix(covar)
コード例 #2
0
from stonesoup.types.array import StateVector, CovarianceMatrix
from stonesoup.types.state import GaussianState
from stonesoup.updater.kalman import KalmanUpdater

# Models
transition_model = CombinedLinearGaussianTransitionModel(
    [ConstantVelocity(1), ConstantVelocity(1)], seed=1)

measurement_model = LinearGaussian(4, [0, 2], np.diag([0.5, 0.5]), seed=2)

# Simulators
groundtruth_sim = MultiTargetGroundTruthSimulator(
    transition_model=transition_model,
    initial_state=GaussianState(
        StateVector([[0], [0], [0], [0]]),
        CovarianceMatrix(np.diag([1000, 10, 1000, 10]))),
    timestep=datetime.timedelta(seconds=5),
    number_steps=100,
    birth_rate=0.2,
    death_probability=0.05,
    seed=3
)
detection_sim = SimpleDetectionSimulator(
    groundtruth=groundtruth_sim,
    measurement_model=measurement_model,
    meas_range=np.array([[-1, 1], [-1, 1]]) * 5000,  # Area to generate clutter
    detection_probability=0.9,
    clutter_rate=1,
    seed=4
)
コード例 #3
0
 def covar(self):
     sigma = 50
     cov = CovarianceMatrix([[np.power(sigma, 2), 0],
                             [0, np.power(sigma, 2)]])
     return cov
コード例 #4
0
                                 pruning=True,
                                 merge_threshold=merge_threshold,
                                 merging=True)

# %%
# Now we initialize the Gaussian mixture at time k=0. In this implementation, the GM-PHD
# tracker knows the start state of the first 3 tracks that were created. After that it
# must pick up on new tracks and discard old ones. It is not necessary to provide the
# tracker with these start states, you can simply define the `tracks` as an empty set.
#
# Feel free to change the `state_vector` from the actual truth state vector to something
# else. This would mimic if the tracker was unsure about where the objects were originating.
from stonesoup.types.state import TaggedWeightedGaussianState
from stonesoup.types.track import Track
from stonesoup.types.array import CovarianceMatrix
covar = CovarianceMatrix(np.diag([10, 5, 10, 5]))

tracks = set()
for truth in start_truths:
    new_track = TaggedWeightedGaussianState(state_vector=truth.state_vector,
                                            covar=covar**2,
                                            weight=0.25,
                                            tag='birth',
                                            timestamp=start_time)
    tracks.add(Track(new_track))

# %%
# The hypothesier takes the current Gaussian mixture as a parameter. Here we will
# initialize it to use later.
reduced_states = set([track[-1] for track in tracks])
コード例 #5
0
    def _smooth_traj(self, traj, process_noise_std=0.5, measurement_noise_std=1):
        # Get detector
        detector = self._get_detector(traj)

        # Models
        if not isinstance(process_noise_std, (list, tuple, np.ndarray)):
            process_noise_std = [process_noise_std, process_noise_std]
        if not isinstance(measurement_noise_std, (list, tuple, np.ndarray)):
            measurement_noise_std = [measurement_noise_std, measurement_noise_std]
        transition_model = CombinedLinearGaussianTransitionModel(
            [
                ConstantVelocity(process_noise_std[0] ** 2),
                ConstantVelocity(process_noise_std[1] ** 2),
            ]
        )
        measurement_model = LinearGaussian(
            ndim_state=4,
            mapping=[0, 2],
            noise_covar=np.diag(
                [measurement_noise_std[0] ** 2, measurement_noise_std[1] ** 2]
            ),
        )
        # Predictor and updater
        predictor = KalmanPredictor(transition_model)
        updater = KalmanUpdater(measurement_model)
        # Initiator
        state_vector = StateVector([0.0, 0.0, 0.0, 0.0])
        covar = CovarianceMatrix(np.diag([0.0, 0.0, 0.0, 0.0]))
        prior_state = GaussianStatePrediction(state_vector, covar)
        initiator = SimpleMeasurementInitiator(prior_state, measurement_model)
        # Filtering
        track = None
        for i, (timestamp, detections) in enumerate(detector):
            if i == 0:
                tracks = initiator.initiate(detections, timestamp)
                track = tracks.pop()
            else:
                detection = detections.pop()
                prediction = predictor.predict(track.state, timestamp=timestamp)
                hypothesis = SingleHypothesis(prediction, detection)
                posterior = updater.update(hypothesis)
                track.append(posterior)
        # Smoothing
        smoother = KalmanSmoother(transition_model)
        smooth_track = smoother.smooth(track)

        # Create new trajectory
        if traj.is_latlon:
            df = traj.df.to_crs("EPSG:3395")
            df.geometry = [
                Point(state.state_vector[0], state.state_vector[2])
                for state in smooth_track
            ]
            df.to_crs(traj.crs, inplace=True)
        else:
            df = traj.df.copy()
            df.geometry = [
                Point(state.state_vector[0], state.state_vector[2])
                for state in smooth_track
            ]
        new_traj = Trajectory(df, traj.id)
        return new_traj
コード例 #6
0
#           R = \begin{bmatrix}
#             \sigma_{\theta}^2 & 0 & 0 \\
#             0 & \sigma_{\phi}^2 & 0 \\
#             0 & 0 & \sigma_{r}^2
#             \end{bmatrix}
#
# We now create our radar.

# Import a radar sensor model
from stonesoup.sensor.radar.radar import RadarElevationBearingRange

# First we need to configure a radar

# Generate a radar sensor with a suitable measurement accuracy
noise_covar = CovarianceMatrix(
    np.array(np.diag([np.deg2rad(3)**2,
                      np.deg2rad(0.15)**2, 25**2])))
# this radar measures range with an accuracy of +/- 25m, and elevation accuracy +/- 3
# degrees and bearing accuracy of +/- 0.15 degrees

# The radar needs to be informed of where x, y, and z are in the target state space
radar_mapping = (0, 2, 4)

# Instantiate the radar
radar = RadarElevationBearingRange(ndim_state=6,
                                   position_mapping=radar_mapping,
                                   noise_covar=noise_covar)
# %%
# Attach the sensor to the platform
# ---------------------------------
# Now that we have created our radar sensor we need to mount the sensor onto the platform we have previously created.
コード例 #7
0
# %%
# With our platform generated we now need to build a set of sensors which will be mounted onto the platform. In this
# case we will exploit a :class:`~.RadarElevationBearingRangeRate` and a :class:`~.PassiveElevationBearing` sensor
# (e.g. an optical sensor, which has no capability to directly measure range).
#
# First we will create a radar which is capable of measuring bearing (:math:`\phi`), elevation (:math:`\theta`), range
# (:math:`r`) and range-rate (:math:`\dot{r}`) of the target platform.

# Import a range rate bearing elevation capable radar
from stonesoup.sensor.radar.radar import RadarElevationBearingRangeRate

# Create a radar sensor
radar_noise_covar = CovarianceMatrix(np.diag(
    np.array([np.deg2rad(3),  # Elevation
              np.deg2rad(3),  # Bearing
              100.,  # Range
              25.])))  # Range Rate

# radar mountings
radar_mounting_offsets = StateVector([10, 0, 0])  # e.g. nose cone
radar_rotation_offsets = StateVector([0, 0, 0])

# Mount the radar onto the platform

radar = RadarElevationBearingRangeRate(ndim_state=6,
                                       position_mapping=(0, 2, 4),
                                       velocity_mapping=(1, 3, 5),
                                       noise_covar=radar_noise_covar,
                                       mounting_offset=radar_mounting_offsets,
                                       rotation_offset=radar_rotation_offsets,
コード例 #8
0
# %%
# Separate out the imports
import numpy as np
import datetime

# %%
# Initialise ground truth
# ^^^^^^^^^^^^^^^^^^^^^^^
# Here are some configurable parameters associated with the ground truth, e.g. defining where
# tracks are born and at what rate, death probability. This follows similar logic to the code
# in previous tutorial section :ref:`auto_tutorials/09_Initiators_&_Deleters:Simulating Multiple
# Targets`.
from stonesoup.types.array import StateVector, CovarianceMatrix
from stonesoup.types.state import GaussianState
initial_state_mean = StateVector([[0], [0], [0], [0]])
initial_state_covariance = CovarianceMatrix(np.diag([4, 0.5, 4, 0.5]))
timestep_size = datetime.timedelta(seconds=5)
number_of_steps = 20
birth_rate = 0.3
death_probability = 0.05
initial_state = GaussianState(initial_state_mean, initial_state_covariance)

# %%
# Create the transition model - default set to 2d nearly-constant velocity with small (0.05)
# variance.
from stonesoup.models.transition.linear import (
    CombinedLinearGaussianTransitionModel, ConstantVelocity)
transition_model = CombinedLinearGaussianTransitionModel(
    [ConstantVelocity(0.05), ConstantVelocity(0.05)])

# %%
コード例 #9
0
reducer = GaussianMixtureReducer(prune_threshold=prune_threshold,
                                 pruning=True,
                                 merge_threshold=merge_threshold,
                                 merging=True)

# %%
# Now we initialize the Gaussian mixture at time k=0. In this implementation, the GM-PHD
# tracker knows the start state of the first 3 tracks that were created. After that it
# must pick up on new tracks and discard old ones.
#
# Feel free to change the `state_vector` from the actual truth state vector to something
# else. This would mimic if the tracker did not know where the objects were originating.
from stonesoup.types.state import TaggedWeightedGaussianState
from stonesoup.types.track import Track
from stonesoup.types.array import CovarianceMatrix
covar = CovarianceMatrix(np.diag([10, 5, 10, 5]))

tracks = set()
for truth in start_truths:
    new_track = TaggedWeightedGaussianState(state_vector=truth.state_vector,
                                            covar=covar**2,
                                            weight=0.25,
                                            tag='birth',
                                            timestamp=start_time)
    tracks.add(Track(new_track))

reduced_states = None

# %%
# Run the Tracker
# ^^^^^^^^^^^^^^^
コード例 #10
0
#   tutorials.

# %%
# Track Initiation
# ****************
# For initialising tracks we will use a :class:`~.MultiMeasurementInitiator`, which allows our
# tracker to tentatively initiate tracks from unassociated measurements, and hold them within the
# initiator until they have survived for at least 10 frames. We also define a
# :class:`~.UpdateTimeStepsDeleter` deleter to be used by the initiator to delete tentative tracks
# that have not been associated to a measurement in the last 3 frames.
from stonesoup.types.state import GaussianState
from stonesoup.types.array import CovarianceMatrix, StateVector
from stonesoup.initiator.simple import MultiMeasurementInitiator
from stonesoup.deleter.time import UpdateTimeStepsDeleter
prior_state = GaussianState(StateVector(np.zeros((6,1))),
                            CovarianceMatrix(np.diag([100**2, 30**2, 100**2, 30**2, 100**2, 100**2])))
deleter_init = UpdateTimeStepsDeleter(time_steps_since_update=3)
initiator = MultiMeasurementInitiator(prior_state, measurement_model, deleter_init,
                                      data_associator, updater, min_points=10)

# %%
# Track Deletion
# **************
# For confirmed tracks we used again a :class:`~.UpdateTimeStepsDeleter`, but this time configured
# to delete tracks after they have not bee associated to a measurement in the last 15 frames.
deleter = UpdateTimeStepsDeleter(time_steps_since_update=15)
# %%
# .. note::
#
#   For more information on the above classes and how they operate you can refer to the Stone
#   `Initiators & Deleters <https://stonesoup.readthedocs.io/en/latest/auto_tutorials/09_Initiators_&_Deleters.html>`_
コード例 #11
0
    def detections_gen(self):
        detections = set()
        current_time = datetime.now()

        num_samps = 1000000
        d = 10
        omega = 50
        fs = 20000
        l = 1  # expected number of targets

        window = 20000
        windowm1 = window - 1

        y = np.loadtxt(self.csv_path, delimiter=',')

        L = len(y)

        N = 9 * window

        max_targets = 5

        nbins = 128

        bin_steps = [(math.pi + 0.1) / (2 * nbins), 2 * math.pi / nbins]

        scans = []

        winstarts = np.linspace(0, L - window, num=int(L / window), dtype=int)

        for win in winstarts:
            # initialise histograms
            param_hist = np.zeros([max_targets, nbins, nbins])
            order_hist = np.zeros([max_targets])

            # initialise params
            p_params = np.empty([max_targets, 2])
            noise = noise_proposal(0)
            [params, K] = proposal([], 0, p_params)

            # calculate sinTy and cosTy
            sinTy = np.zeros([9])
            cosTy = np.zeros([9])

            alpha = np.zeros([9])

            yTy = 0

            for k in range(0, 9):
                for t in range(0, window):
                    sinTy[k] = sinTy[k] + math.sin(
                        2 * math.pi * t * omega / fs) * y[t + win, k]
                    cosTy[k] = cosTy[k] + math.cos(
                        2 * math.pi * t * omega / fs) * y[t + win, k]
                    yTy = yTy + y[t + win, k] * y[t + win, k]

            sumsinsq = 0
            sumcossq = 0
            sumsincos = 0

            for t in range(0, window):
                sumsinsq = sumsinsq + math.sin(
                    2 * math.pi * t * omega / fs) * math.sin(
                        2 * math.pi * t * omega / fs)
                sumcossq = sumcossq + math.cos(
                    2 * math.pi * t * omega / fs) * math.cos(
                        2 * math.pi * t * omega / fs)
                sumsincos = sumsincos + math.sin(
                    2 * math.pi * t * omega / fs) * math.cos(
                        2 * math.pi * t * omega / fs)

            old_logp = calc_acceptance(noise, params, K, omega, 1, d, y,
                                       window, sinTy, cosTy, yTy, alpha,
                                       sumsinsq, sumcossq, sumsincos, N, l)

            n = 0

            while n < num_samps:
                p_noise = noise_proposal(noise)
                [p_params, p_K,
                 Qratio] = proposal_func(params, K, p_params, max_targets)
                if p_K != 0:
                    new_logp = calc_acceptance(p_noise, p_params, p_K, omega,
                                               1, d, y, window, sinTy, cosTy,
                                               yTy, alpha, sumsinsq, sumcossq,
                                               sumsincos, N, l)
                    logA = new_logp - old_logp + np.log(Qratio)
                    # do a Metropolis-Hastings step
                    if logA > np.log(random.uniform(0, 1)):
                        old_logp = new_logp
                        params = copy.deepcopy(p_params)
                        K = copy.deepcopy(p_K)
                    for k in range(0, K):
                        bin_ind = [0, 0]
                        for l in range(0, 2):
                            edge = bin_steps[l]
                            while edge < params[k, l]:
                                edge += bin_steps[l]
                                bin_ind[l] += 1
                                if bin_ind[l] == nbins - 1:
                                    break
                        param_hist[K - 1, bin_ind[0], bin_ind[1]] += 1
                    order_hist[K - 1] += 1
                    n += 1

            # look for peaks in histograms
            max_peak = 0
            max_ind = 0
            for ind in range(0, max_targets):
                if order_hist[ind] > max_peak:
                    max_peak = order_hist[ind]
                    max_ind = ind

            # FOR TESTING PURPOSES ONLY - SET max_ind = 0
            max_ind = 0

            # look for largest N peaks, where N corresponds to peak in the order histogram
            # use divide-and-conquer quadrant-based approach
            if max_ind == 0:
                [unique_peak_inds1, unique_peak_inds2
                 ] = np.unravel_index(param_hist[0, :, :].argmax(),
                                      param_hist[0, :, :].shape)
                num_peaks = 1
            else:
                order_ind = max_ind - 1
                quadrant_factor = 2
                nstart = 0
                mstart = 0
                nend = quadrant_factor
                mend = quadrant_factor
                peak_inds1 = [None] * 16
                peak_inds2 = [None] * 16
                k = 0
                while quadrant_factor < 32:
                    max_quadrant = 0
                    quadrant_size = nbins / quadrant_factor
                    for n in range(nstart, nend):
                        for m in range(mstart, mend):
                            [ind1, ind2] = np.unravel_index(
                                param_hist[order_ind,
                                           int(n * quadrant_size):int(
                                               (n + 1) * quadrant_size - 1),
                                           int(m * quadrant_size):int(
                                               (m + 1) * quadrant_size -
                                               1)].argmax(),
                                param_hist[order_ind,
                                           int(n * quadrant_size):int(
                                               (n + 1) * quadrant_size - 1),
                                           int(m * quadrant_size):int(
                                               (m + 1) * quadrant_size -
                                               1)].shape)
                            peak_inds1[k] = int(ind1 + n * quadrant_size)
                            peak_inds2[k] = int(ind2 + m * quadrant_size)
                            if param_hist[order_ind, peak_inds1[k],
                                          peak_inds2[k]] > max_quadrant:
                                max_quadrant = param_hist[order_ind,
                                                          peak_inds1[k],
                                                          peak_inds2[k]]
                                max_ind1 = n
                                max_ind2 = m
                            k += 1
                    quadrant_factor = 2 * quadrant_factor
                    # on next loop look for other peaks in the quadrant containing the highest peak
                    nstart = 2 * max_ind1
                    mstart = 2 * max_ind2
                    nend = 2 * (max_ind1 + 1)
                    mend = 2 * (max_ind2 + 1)

                # determine unique peaks
                unique_peak_inds1 = [None] * 16
                unique_peak_inds2 = [None] * 16
                unique_peak_inds1[0] = peak_inds1[0]
                unique_peak_inds2[0] = peak_inds2[0]
                num_peaks = 1
                for n in range(0, 16):
                    flag_unique = 1
                    for k in range(0, num_peaks):
                        # check if peak is close to any other known peaks
                        if (unique_peak_inds1[k] - peak_inds1[n]) < 2:
                            if (unique_peak_inds2[k] - peak_inds2[n]) < 2:
                                # part of same peak (check if bin is taller)
                                if param_hist[order_ind, peak_inds1[n],
                                              peak_inds2[n]] > param_hist[
                                                  order_ind,
                                                  unique_peak_inds1[k],
                                                  unique_peak_inds2[k]]:
                                    unique_peak_inds1 = peak_inds1[n]
                                    unique_peak_inds2 = peak_inds2[n]
                                flag_unique = 0
                                break
                    if flag_unique == 1:
                        unique_peak_inds1[num_peaks] = peak_inds1[n]
                        unique_peak_inds2[num_peaks] = peak_inds2[n]
                        num_peaks += 1

            # Defining a detection
            state_vector = StateVector([
                unique_peak_inds2 * bin_steps[1],
                unique_peak_inds1 * bin_steps[0]
            ])  # [Azimuth, Elevation]
            covar = CovarianceMatrix(np.array([[1, 0],
                                               [0, 1]]))  # [[AA, AE],[AE, EE]]
            measurement_model = LinearGaussian(ndim_state=4,
                                               mapping=[0, 2],
                                               noise_covar=covar)
            current_time = current_time + timedelta(milliseconds=window)
            detection = Detection(state_vector,
                                  timestamp=current_time,
                                  measurement_model=measurement_model)
            detections = set([detection])

            scans.append((current_time, detections))

        # For every timestep
        for scan in scans:
            yield scan[0], scan[1]
コード例 #12
0
    def detections_gen(self):
        detections = set()
        current_time = datetime.now()

        y = np.loadtxt(self.csv_path, delimiter=',')

        L = len(y)

        # frequency of sinusoidal signal
        omega = 50

        window = 20000
        windowm1 = window - 1

        thetavals = np.linspace(0, 2 * math.pi, num=400)
        phivals = np.linspace(0, math.pi / 2, num=100)

        # spatial locations of hydrophones
        z = np.matrix(
            '0 0 0; 0 10 0; 0 20 0; 10 0 0; 10 10 0; 10 20 0; 20 0 0; 20 10 0; 20 20 0'
        )

        N = 9  # No. of hydrophones

        # steering vector
        v = np.zeros(N, dtype=np.complex)

        # directional unit vector
        a = np.zeros(3)

        scans = []

        winstarts = np.linspace(0, L - window, num=int(L / window), dtype=int)

        c = 1481 / (2 * omega * math.pi)

        for t in winstarts:
            # calculate covariance estimate
            R = np.matmul(np.transpose(y[t:t + windowm1]), y[t:t + windowm1])
            R_inv = np.linalg.inv(R)

            maxF = 0
            maxtheta = 0
            maxfreq = 0

            for theta in thetavals:
                for phi in phivals:
                    # convert from spherical polar coordinates to cartesian
                    a[0] = math.cos(theta) * math.sin(phi)
                    a[1] = math.sin(theta) * math.sin(phi)
                    a[2] = math.cos(phi)
                    a = a / math.sqrt(np.sum(a * a))
                    for n in range(0, N):
                        phase = np.sum(a * np.transpose(z[n, ])) / c
                        v[n] = math.cos(phase) - math.sin(phase) * 1j
                    F = 1 / (
                        (window - N) * np.transpose(np.conj(v)) @ R_inv @ v)
                    if F > maxF:
                        maxF = F
                        maxtheta = theta
                        maxphi = phi

            # Defining a detection
            state_vector = StateVector([maxtheta,
                                        maxphi])  # [Azimuth, Elevation]
            covar = CovarianceMatrix(np.array([[1, 0],
                                               [0, 1]]))  # [[AA, AE],[AE, EE]]
            measurement_model = LinearGaussian(ndim_state=4,
                                               mapping=[0, 2],
                                               noise_covar=covar)
            current_time = current_time + timedelta(milliseconds=window)
            detection = Detection(state_vector,
                                  timestamp=current_time,
                                  measurement_model=measurement_model)
            detections = set([detection])

            scans.append((current_time, detections))

        # For every timestep
        for scan in scans:
            yield scan[0], scan[1]
コード例 #13
0
    found = False
    for arg in args:
        if (arg.find(flag) >= 0):
            found = True
            return arg.split(flag)[1]
    if (found == False):
        raise Exception('Required argument {} was not found'.format(flag))


args = sys.argv
data_file = get_arg(args, '--datafile=')

transition_model = CombinedLinearGaussianTransitionModel(
    [ConstantVelocity(0.01), ConstantVelocity(0.01)])

covar = CovarianceMatrix(np.array([[1, 0], [0, 1]]))  # [[AA, AF],[AF, FF]]
measurement_model = LinearGaussian(ndim_state=4,
                                   mapping=[0, 2],
                                   noise_covar=covar)

from stonesoup.predictor.kalman import KalmanPredictor
predictor = KalmanPredictor(transition_model)

from stonesoup.updater.kalman import KalmanUpdater
updater = KalmanUpdater(measurement_model)

from stonesoup.types.state import GaussianState
prior = GaussianState([[0.5], [0], [0.5], [0]],
                      np.diag([1, 0, 1, 0]),
                      timestamp=datetime.now())
コード例 #14
0
ファイル: UAV_tutorial.py プロジェクト: d-schwab/Stone-Soup
from stonesoup.updater.kalman import ExtendedKalmanUpdater
from stonesoup.models.measurement.nonlinear import (
    CartesianToElevationBearingRange)
from stonesoup.types.array import CovarianceMatrix

transition_model = CombinedLinearGaussianTransitionModel(
    [ConstantVelocity(1.0),
     ConstantVelocity(1.0),
     ConstantVelocity(1.0)])

# Model coords = elev, bearing, range. Angles in radians
meas_covar = np.diag(
    [np.radians(np.sqrt(10.0))**2,
     np.radians(0.6)**2, 3.14**2])

meas_covar_trk = CovarianceMatrix(1.0 * meas_covar)
meas_model = CartesianToElevationBearingRange(ndim_state=6,
                                              mapping=np.array([0, 2, 4]),
                                              noise_covar=meas_covar_trk)
predictor = ExtendedKalmanPredictor(transition_model)
updater = ExtendedKalmanUpdater(measurement_model=meas_model)

# %%
# Setup CSV reader & feeder
# -------------------------
# Setup the reader and feeder to read the GPS data in
# :download:`CSV file <../../demos/UAV_Rot.csv>`.
# This part uses 2 Stone Soup detector type of classes:
#
# - :class:`~.CSVGroundTruthReader` - reads our CSV file which contains: timestamp,
#   latitude, longitude, altitude and other miscellaneous data.