def NN_LG(p_prior, Z, PD, lamb_c, F, Q, H, R):
    """ 
        Nearest neighbour algoritm for linear and gaussian models 
        with constant probability of detection and uniform clutter.
    """
    # Predict
    x_k_kmin1, P_k_kmin1 = kalman_prediction(p_prior.x, p_prior.P, F, Q)
    
    # Compute w_tilde for all data associations theta
    mk = len(Z)
    w_tilde = np.zeros(mk+1)
    # - Precompute predicted likelihood
    z_bar, S = kalman_prediction(x_k_kmin1, P_k_kmin1, H, R)
    for theta in range(mk + 1):
        if theta == 0:
            w_tilde[theta] = 1 - PD
        else:
            z = Z[theta - 1]
            w_tilde[theta] = (PD / lamb_c) * multivariate_normal.pdf(z, mean=z_bar, cov=S)

    # Find most probable data association hypotesis
    theta_star = np.argmax(w_tilde)

    # Compute posterior based on theta_star
    x_k_k, P_k_k = x_k_kmin1, P_k_kmin1 # Assume theta_star is 0
    if theta_star > 0:
        z = Z[theta_star - 1]
        x_k_k, P_k_k = kalman_update(x_k_kmin1, P_k_kmin1, H, R, z)

    return Gaussian(x_k_k, P_k_k), Gaussian(x_k_kmin1, P_k_kmin1), theta_star
def GNN_LG(p_prior, Z, n, PD, lamb_c, F, Q, H, R):
    """ 
        Global nearest neighbour algorithm for linear and gaussian models 
        with constant probability of detection and uniform clutter.
    """
    # Dimensions
    nk = p_prior.components[0].x.shape[0]
    mk = len(Z)
    # Allocate
    p_pred = []
    x_k_k = np.zeros((nk,n))
    P_k_k = np.zeros((nk,nk,n))
    # Predict
    for p_i in p_prior.components: # for each objects prior
        x_k_kmin1, P_k_kmin1 = kalman_prediction(p_i.x, p_i.P, F, Q)
        p_pred.append(
            Gaussian(x_k_kmin1, P_k_kmin1, p_i.w)
        )
    p_pred = GaussianMixture(p_pred)
    # Create cost matrix
    # - Allocate
    L_a = np.zeros((n,mk))
    L_ua = np.ones((n,n)) * np.inf
    L = np.hstack((L_a,L_ua))
    # - Compute log weights
    for i, p_i in enumerate(p_pred.components):
        # - Predicted likelihood
        z_h_i = H @ p_i.x
        S_h_i = H @ p_i.P @ H.T + R
        for theta in range(mk+1):
            # - Cost
            if theta == 0:
                L[i,mk+i] = -np.log(1 - PD)
            else:
                j = theta - 1
                z = Z[j]
                L[i,j] = -(np.log(PD/lamb_c) - 0.5 * np.log(np.linalg.det(2*np.pi*S_h_i)) - \
                        0.5 * (z - z_h_i).T @ np.linalg.inv(S_h_i) @ (z - z_h_i))
    # Find optimal assignment
    print(L)
    theta_star = optimal_assignment(L)
    # Compute posterior density
    for i in range(n):
        print(f'theta={theta_star[i]}')
        x_k_kmin1, P_k_kmin1 = p_pred.components[i].x, p_pred.components[i].P
        if theta_star[i] >= mk:
            x_k_k[:,i], P_k_k[:,:,i] = x_k_kmin1, P_k_kmin1
        else:
            z = Z[theta_star[i]]
            x_k_k[:,i], P_k_k[:,:,i] = kalman_update(x_k_kmin1, P_k_kmin1, H, R, z)
    p = GaussianMixture(
        [Gaussian(x_k_k[:,i], P_k_k[:,:,i]) for i in range(n)]
    )
    return p, p_pred, theta_star
Example #3
0
def exact_posterior_LG(p_prior, Z, PD, lamb_c, F, Q, H, R):
    """ 
        Exact posterior distribution given linear and gaussian models 
        with constant probability of detection and uniform clutter.
    """
    # Dimensions
    nk = p_prior.components[0].x.shape[0]
    mk = len(Z)
    # Allocate
    p_pred = []
    n_h_k = p_prior.n_components * (mk + 1)
    x_k_k = np.zeros((nk, n_h_k))
    P_k_k = np.zeros((nk, nk, n_h_k))
    w_tilde = np.zeros(n_h_k)
    h = -1  # Index for hypotesis at time k

    for p_h in p_prior.components:
        # Predict
        # - Gaussian, p_k|k-1(theta_1:k-1)
        x_k_kmin1, P_k_kmin1 = kalman_prediction(p_h.x, p_h.P, F, Q)
        # - Density, p(xk|Z_1:k-1)
        p_pred.append(Gaussian(x_k_kmin1, P_k_kmin1, p_h.w))
        # Update
        # - Precompute predicted likelihood
        z_bar, S = kalman_prediction(x_k_kmin1, P_k_kmin1, H, R)
        # - Consider all data associations
        for theta in range(mk + 1):
            # Update
            h += 1
            if theta == 0:
                # - Gaussian
                x_k_k[:, h], P_k_k[:, :, h] = x_k_kmin1, P_k_kmin1
                # - Weight
                w_tilde[h] = p_h.w * (1 - PD)
            else:
                z = Z[theta - 1]
                # - Gaussian
                x_k_k[:, h], P_k_k[:, :,
                                   h] = kalman_update(x_k_kmin1, P_k_kmin1, H,
                                                      R, z)
                # - Weight
                w_tilde[h] = (p_h.w * PD / lamb_c) * multivariate_normal.pdf(
                    z, mean=z_bar, cov=S)

    # Normalize weights
    w_k = w_tilde / sum(w_tilde)
    # Predicted and posterior densities
    p_pred = GaussianMixture(p_pred)
    p = GaussianMixture(
        [Gaussian(x_k_k[:, i], P_k_k[:, :, i], w_k[i]) for i in range(n_h_k)])

    return p, p_pred
def PDA_LG(p_prior, Z, PD, lamb_c, F, Q, H, R):
    """ 
        Probabilistic data association filtering for linear and gaussian models 
        with constant probability of detection and uniform clutter.
    """
    # Dimensions
    nk = len(p_prior.x)
    mk = len(Z)

    # Predict
    x_k_kmin1, P_k_kmin1 = kalman_prediction(p_prior.x, p_prior.P, F, Q)

    # Update
    x_k_k = np.zeros((nk,mk+1))
    P_k_k = np.zeros((nk,nk,mk+1))
    w_tilde_theta = np.zeros(mk+1)
    # - Precompute predicted likelihood
    z_bar, S = kalman_prediction(x_k_kmin1, P_k_kmin1, H, R)
    for theta in range(mk + 1): 
        if theta == 0:
            # - Gaussian
            x_k_k[:,theta], P_k_k[:,:,theta] = x_k_kmin1, P_k_kmin1
            # - Weight
            w_tilde_theta[theta] = 1 - PD
        else:
            # - Gaussian
            z = Z[theta - 1]
            x_k_k[:,theta], P_k_k[:,:,theta] = kalman_update(x_k_kmin1, P_k_kmin1, H, R, z)
            # - Weight
            w_tilde_theta[theta] = (PD / lamb_c) * multivariate_normal.pdf(z, z_bar, S)
            
    # - Normalize weights
    w = w_tilde_theta / sum(w_tilde_theta)

    # Reduce to single gaussian density
    # - Mean
    x_PDA = np.zeros((nk,))
    for i in range(mk + 1):
        x_PDA += w[i] * x_k_k[:,i]
    # - Covariance
    P_PDA = np.zeros((nk,nk))
    for i in range(mk + 1):
        P_PDA += w[i] * P_k_k[:,:,i] + w[i] * (x_PDA - x_k_k[:,i]) @ (x_PDA - x_k_k[:,i]).T
    
    return Gaussian(x_PDA, P_PDA), Gaussian(x_k_kmin1, P_k_kmin1)
Example #5
0
def HO_MHT_LG(p_prior, Z, n, PD, lamb_c, F, Q, H, R, Nmax, M, tol_prune):
    # Dimensions
    mk = len(Z)
    # Predict
    p_pred = []
    for p_h in p_prior.mixtures:
        p_pred_h = []
        for p_h_i in p_h.components:
            x_k_kmin1, P_k_kmin1 = kalman_prediction(p_h_i.x, p_h_i.P, F, Q)
            p_pred_h.append(Gaussian(x_k_kmin1, P_k_kmin1))
        p_pred.append(GaussianMixture(p_pred_h, p_h.w))
    p_pred = MultiGaussianMixture(p_pred, p_prior.ws)

    # Update
    # hk = 0 # Hypotesis index
    p = []
    for p_h_kmin1 in p_pred.mixtures:
        L = create_cost_matrix(p_h_kmin1, Z, n, PD, lamb_c, H, R)
        M = 10
        theta_stars, costs = compute_M_associations(L, M)
        print(f'theta_stars = {theta_stars}')
        print(f'costs = {costs}')
        for m in range(theta_stars.shape[0]):
            p_h_k = []
            # hk += 1
            l_tilde = p_h_kmin1.w
            for i in range(n):
                if theta_stars[m, i] > mk:  # Not associated
                    p_h_k.append(Gaussian(p_h_kmin1.x, p_h_kmin1.P))
                else:  # Associated
                    z = Z[theta_stars[m, i]]
                    x_k_k, P_k_k = kalman_update(p_h_kmin1.x, p_h_kmin1.P, H,
                                                 R, z)
                    p_h_k.append(Gaussian(x_k_k, P_k_k))
                l_tilde += L[i, theta_stars[m, i]]
            p.append(GaussianMixture(p_h_k, l_tilde))  # Yes but no

    # Normalize log-weights
    l = l_tilde / sum(l_tilde)  # Yes but no
    p = MultiGaussianMixture(p, l)
def GSF_linear_gaussian_models_simple_scenario():
    arr = lambda scalar: np.array([[scalar]])
    # Prior
    x_prior = arr(0.5)
    P_prior = arr(0.2)
    p_prior_exact = GaussianMixture(Gaussian(x_prior, P_prior, weight=1))
    p_prior_GSF = GaussianMixture(Gaussian(x_prior, P_prior, weight=1))

    # Models
    # - Motion
    Q = arr(0.35)
    F = arr(1)
    # - Measurement
    R = arr(0.2)
    H = arr(1)
    PD = 0.9
    # - Clutter
    lamb = 0.4

    # GSF tuning params
    Nmax = 5
    tol_prune = 0.01  # or None

    # Sensor
    space = Space1D(-4, 4)

    # Create measurement vector
    Z1 = [-1.3, 1.7]
    Z2 = [1.3]
    Z3 = [-0.3, 2.3]
    Z4 = [-2, 3]
    Z5 = [2.6]
    Z6 = [-3.5, 2.8]
    Zs = [Z1, Z2, Z3, Z4, Z5, Z6]

    # Plot settings
    ax = plt.subplot()
    res = 100
    marker_size = 100
    x_lim = [space.min, space.max]

    # Compute exact posterior
    for k, Z in enumerate(Zs, start=1):
        print(f'Measurements: {Z}')
        # Compute posterior
        p_exact, p_pred_exact = exact_posterior_LG(p_prior_exact, Z, PD, lamb,
                                                   F, Q, H, R)
        p_GSF, p_pred_GSF = GSF_LG(p_prior_GSF, Z, PD, lamb, F, Q, H, R, Nmax,
                                   tol_prune)

        # Number of hypotesis
        print(f'Number of hypotesis, exact posterior: {p_exact.n_components}')
        print(f'Number of hypotesis, GSF posterior: {p_GSF.n_components}')

        # Plot
        ax.clear()
        ax.set(title=f'k = {k}', xlabel='x')
        ax.axhline(0, color='gray', linewidth=0.5)
        # - Predicted density according to GSF
        plt1 = plot_gaussian_mixture_pdf(ax,
                                         p_pred_GSF,
                                         x_lim,
                                         res=res,
                                         color='r',
                                         linestyle='--',
                                         zorder=0)
        # - Exact posterior density
        plt2 = plot_gaussian_mixture_pdf(ax,
                                         p_exact,
                                         x_lim,
                                         res=res,
                                         color='k',
                                         zorder=1)
        # - Posterior density according to GSF
        plt3 = plot_gaussian_mixture_pdf(ax,
                                         p_GSF,
                                         x_lim,
                                         res=res,
                                         color='orange',
                                         linestyle='--',
                                         marker='x',
                                         zorder=2)
        # - Measurements
        plot_1D_measurements(ax,
                             Z,
                             color='b',
                             marker='*',
                             s=marker_size,
                             zorder=3)
        # - Final details
        ax.set_xlim([space.min, space.max])
        ax.set_ylim([-0.05, 1.05])
        ax.legend((plt1, plt2, plt3), ('pred', 'exact', 'GSF'))
        plt.pause(0.0001)

        p_prior_exact = p_exact
        p_prior_GSF = p_GSF

        input('Press to continue')
Example #7
0
def GNN_LG_models_2_obj_1D():
    arr = lambda scalar: np.array([[scalar]])

    # Prior
    x_prior = arr(2.5)
    P_prior = arr(0.36)
    p_prior = GaussianMixture(
        [Gaussian(-x_prior, P_prior),
         Gaussian(x_prior, P_prior)])
    n = 2
    # g_mix = GaussianMixture([Gaussian(-3, 0.2), Gaussian(3, 0.2)])
    # p_prior = MultiGaussianMixture(g_mix)

    # Models
    # - Motion
    Q = arr(0.25)
    F = arr(1)
    # - Measurement
    R = arr(0.2)
    H = arr(1)
    PD = 0.85
    # - Clutter
    lamb = 0.3

    # Measurements
    Z = [-3.2, -2.4, 1.9, 2.2, 2.7, 2.9]
    Z = [arr(val) for val in Z]

    p, p_pred, theta_star = GNN_LG(p_prior, Z, n, PD, lamb, F, Q, H, R)

    # Plot
    ax = plt.subplot()
    res = 100
    marker_size = 100
    x_lim = [-4, 4]
    colors = ['orange', 'purple']
    ax.axhline(0, color='gray', linewidth=0.5)
    # - Measurements
    plot_1D_measurements(ax, Z, color='b', marker='*', s=marker_size, zorder=3)
    # - Object densities
    plts = []
    for i in range(n):
        # - Prior density
        plt1 = plot_gaussian_pdf(ax,
                                 p_prior.components[i],
                                 x_lim,
                                 res=res,
                                 color='g',
                                 zorder=1)
        # - Predicted density
        plt2 = plot_gaussian_pdf(ax,
                                 p_pred.components[i],
                                 x_lim,
                                 res=res,
                                 color='r',
                                 linestyle='--',
                                 zorder=0)
        # - Posterior density
        plt3 = plot_gaussian_pdf(ax,
                                 p.components[i],
                                 x_lim,
                                 res=res,
                                 color=colors[i],
                                 linestyle='--',
                                 zorder=2)
        # - Association
        if theta_star[i] >= len(Z):
            ax.scatter(p_pred.components[i].x,
                       0,
                       color=colors[i],
                       marker='o',
                       s=marker_size,
                       zorder=3)
        else:
            ax.scatter(Z[theta_star[i]],
                       0,
                       color=colors[i],
                       marker='*',
                       s=marker_size,
                       zorder=3)
        [plts.append(plt) for plt in [plt1, plt2, plt3]]
    # - Final details
    # ax.set_xlim(x_lim)
    ax.set_ylim([-0.05, 1.5])
    ax.legend(
        plts,
        ('o1_prior', 'o1_pred', 'o1_GNN', 'o2_prior', 'o2_pred', 'o2_GNN'))
    plt.pause(0.0001)
Example #8
0
def GNN_LG_models_2_obj_sequence_1D():
    arr = lambda scalar: np.array([[scalar]])

    # Space
    space = Space1D(-4, 4)

    # Prior
    x_prior = arr(2.5)
    P_prior = arr(0.36)
    p_prior = GaussianMixture(
        [Gaussian(-x_prior, P_prior),
         Gaussian(x_prior, P_prior)])

    # Models
    # - Motion
    Q = arr(0.25)
    F = arr(1)
    # - Measurement
    R = arr(0.2)
    H = arr(1)
    PD = 0.85
    _PD = lambda x: PD
    # - Clutter
    lamb = 0.4
    clutter_model = UniformClutterModel1D(space, lamb)

    # Create true sequence
    n = 2
    n_ks = 50
    X = np.vstack((-x_prior * np.ones(n_ks), x_prior * np.ones(n_ks)))

    # Measurements
    sensor = Detector1D(R, _PD, clutter_model, space)
    Z = sensor.get_measurements(X)

    # Estimate trajectories
    x_hat = np.zeros((n, n_ks))
    P_hat = np.zeros((n, n_ks))
    thetas = np.zeros((n, n_ks))
    for k in range(n_ks):
        print(f'---k={k+1}---')
        print(Z[k])
        p, p_pred, theta_star = GNN_LG(p_prior, Z[k], n, PD, lamb, F, Q, H, R)
        for i in range(n):
            x_hat[i, k] = p.components[i].x
            P_hat[i, k] = p.components[i].P
            thetas[i, k] = theta_star[i]
        p_prior = p

    # Plot
    ax = plt.subplot()
    marker_size = 100
    colors = ['orange', 'purple']
    ass_colors = ['g', 'g']
    y_axis = np.linspace(1, n_ks, num=n_ks)
    # - Heatmaps
    for k in range(n_ks):
        for i in range(n):
            plot_1D_heatmap(ax, x_hat[i, k], P_hat[i, k], k + 1)
    # - True trajectories
    for i in range(n):
        ax.plot(X[i, :], y_axis, color='k', marker='o')
    # - Measurements
    for k in range(n_ks):
        ax.axhline(k + 1.5, color='gray', linewidth=0.5)
        plot_1D_measurements(ax,
                             Z[k],
                             k=k + 1,
                             color='b',
                             marker='*',
                             s=marker_size)
    # - Associations
    for k in range(n_ks):
        for i in range(n):
            if thetas[i, k] < len(Z[k]):
                ax.scatter(Z[k][int(thetas[i, k])],
                           k + 1,
                           color=ass_colors[i],
                           marker='*',
                           s=marker_size)
    # - Estimates
    for i in range(n):
        ax.plot(x_hat[i, :], y_axis, color=colors[i], marker='s')
    # - Final details
    ax.set(xlabel='x', ylabel='k')
    ax.set_xlim([space.min, space.max])

    plt.pause(0.0001)
def PDA_linear_gaussian_models_simple_scenario():
    arr = lambda scalar: np.array([[scalar]])
    # Prior
    x_prior = arr(0.5)
    P_prior = arr(0.2)
    p_prior_exact = Gaussian(x_prior, P_prior, weight=1)
    p_prior_PDA = Gaussian(x_prior, P_prior)

    # Models
    # - Motion
    Q = arr(0.35)
    F = arr(1)
    # - Measurement
    R = arr(0.2)
    H = arr(1)
    PD = 0.9
    # - Clutter
    lamb = 0.4

    # Sensor
    space = Space1D(-4, 4)

    # Create measurement vector
    Z1 = [-1.3, 1.7]
    Z2 = [1.3]
    Z3 = [-0.3, 2.3]
    Z4 = [-2, 3]
    Z5 = [2.6]
    Z6 = [-3.5, 2.8]
    Zs = [Z1, Z2, Z3, Z4, Z5, Z6]

    # Plot settings
    ax = plt.subplot()
    res = 100
    x_lim = [space.min, space.max]

    # Compute exact posterior
    for k, Z in enumerate(Zs, start=1):
        print(f'Measurements: {Z}')
        # Compute posterior
        p_exact, p_pred_exact = exact_posterior_LG(p_prior_exact, Z, PD, lamb,
                                                   F, Q, H, R)
        p_PDA, p_pred_PDA = PDA_LG(p_prior_PDA, Z, PD, lamb, F, Q, H, R)

        # Number of hypotesis
        print(f'number of hypotesis: {p_exact.n_components}')

        # Plot
        ax.clear()
        ax.set(title=f'k = {k}', xlabel='x')
        ax.axhline(0, color='gray', linewidth=0.5)
        # - Predicted density according to PDA
        plt1 = plot_gaussian_pdf(ax,
                                 p_pred_PDA,
                                 x_lim,
                                 res=res,
                                 color='r',
                                 linestyle='--',
                                 zorder=0)
        # - Exact posterior density
        plt2 = plot_gaussian_mixture_pdf(ax,
                                         p_exact,
                                         x_lim,
                                         res=res,
                                         color='k',
                                         zorder=1)
        # - Posterior density according to PDA
        plt3 = plot_gaussian_pdf(ax,
                                 p_PDA,
                                 x_lim,
                                 res=res,
                                 color='g',
                                 marker='s',
                                 zorder=2)

        # - Hypotesis from PDA
        marker_size = 100
        ax.scatter(p_pred_PDA.x,
                   0,
                   color='b',
                   marker='o',
                   s=marker_size,
                   zorder=3)
        x_axis = np.zeros(len(Z))
        ax.scatter(Z, x_axis, color='b', marker='*', s=marker_size, zorder=3)

        ax.set_xlim(x_lim)
        ax.set_ylim([-0.05, 1.2])
        ax.legend((plt1, plt2, plt3), ('pred', 'exact', 'PDA'))

        p_prior_exact = p_exact
        p_prior_PDA = p_PDA

        plt.pause(0.0001)
        input('Press to continue')
Example #10
0
def GSF_LG(p_prior,
           Z,
           PD,
           lamb_c,
           F,
           Q,
           H,
           R,
           Nmax,
           tol_prune=None,
           tol_merge=None):
    """ 
        Gaussian sum filtering for linear and gaussian models 
        with constant probability of detection and uniform clutter.
    """
    # Dimensions
    nk = p_prior.components[0].x.shape[0]
    mk = len(Z)
    # Allocate
    p_pred = []
    n_h_k = p_prior.n_components * (mk + 1)
    x_k_k = np.zeros((nk, n_h_k))
    P_k_k = np.zeros((nk, nk, n_h_k))
    w_tilde = np.zeros(n_h_k)
    h = -1  # Index for hypotesis at time k
    for p_h in p_prior.components:
        # Predict
        x_k_kmin1, P_k_kmin1 = kalman_prediction(p_h.x, p_h.P, F, Q)
        p_pred.append(Gaussian(x_k_kmin1, P_k_kmin1, p_h.w))
        # Update
        # - Precompute predicted likelihood
        z_bar, S = kalman_prediction(x_k_kmin1, P_k_kmin1, H, R)
        # - Consider all data associations
        for theta in range(mk + 1):
            h += 1
            if theta == 0:
                # - Gaussian
                x_k_k[:, h], P_k_k[:, :, h] = x_k_kmin1, P_k_kmin1
                # - Weight
                w_tilde[h] = p_h.w * (1 - PD)
            else:
                z = Z[theta - 1]
                # - Gaussian
                x_k_k[:, h], P_k_k[:, :,
                                   h] = kalman_update(x_k_kmin1, P_k_kmin1, H,
                                                      R, z)
                # - Weight
                w_tilde[h] = (p_h.w * PD / lamb_c) * multivariate_normal.pdf(
                    z, z_bar, S)

    # Normalize weights
    w_k = w_tilde / sum(w_tilde)

    # Reduction
    # - Pruning
    w_k, x_k_k, P_k_k = prune(w_k, x_k_k, P_k_k, tol_prune)
    # - Merging
    w_k, x_k_k, P_k_k = merge(w_k, x_k_k, P_k_k, tol_merge)
    # - Capping
    w_k, x_k_k, P_k_k = capp(w_k, x_k_k, P_k_k, Nmax)

    # Create final mixtures
    H_k = x_k_k.shape[1]
    p_pred = GaussianMixture(p_pred)
    p = GaussianMixture(
        [Gaussian(x_k_k[:, i], P_k_k[:, :, i], w_k[i]) for i in range(H_k)])
    return p, p_pred