예제 #1
0
def dir_der_analytical_smoothing_cost(x_0, p, measurements, m_1_0, P_1_0,
                                      motion_model: MotionModel,
                                      meas_model: MeasModel):
    """Directional derivative of the cost function f in `analytical_smoothing_cost`

    Here, the full trajectory x_1:K is interpreted as one vector (x_1^T, ..., x_K)^T with K d_x elements.

    Args:
        x_0: current iterate
            represented as a np.array(K, D_x).
        p: search direction, here: x_1 - x_0, i.e. new smoothing estimated means minus current iterate.
            represented as a np.array(K, D_x).
        measurements: measurements for a time sequence 1, ..., K
            represented as a list of length K of np.array(D_y,)
    """
    K = len(measurements)

    prior_diff = x_0[0, :] - m_1_0
    motion_diff = x_0[1:, :] - motion_model.map_set(x_0[:-1, :], None)
    meas_diff = measurements - meas_model.map_set(x_0, None)

    der = p[0, :] @ np.linalg.inv(P_1_0) @ prior_diff
    H_1 = meas_model.jacobian(x_0[0, :], 1)
    R_1_inv = np.linalg.inv(meas_model.meas_noise(1))
    der -= p[0, :] @ H_1.T @ R_1_inv @ meas_diff[0, :]

    for k_ind in range(1, K):
        k = k_ind + 1
        F_k_min_1 = motion_model.jacobian(x_0[k_ind - 1, :], k - 1)
        factor_1 = p[k_ind, :].T - F_k_min_1 @ p[k_ind - 1, :].T
        der += factor_1 @ np.linalg.inv(
            motion_model.proc_noise(k - 1)) @ motion_diff[k_ind - 1, :]
        if any(np.isnan(meas_diff[k_ind, :])):
            continue
        H_k = meas_model.jacobian(x_0[k_ind, :], k)
        R_k_inv = np.linalg.inv(meas_model.meas_noise(k))
        der -= p[k_ind, :] @ H_k.T @ R_k_inv @ meas_diff[k_ind, :]

    return der
예제 #2
0
def grad_analytical_smoothing_cost(x_0, measurements, m_1_0, P_1_0,
                                   motion_model: MotionModel,
                                   meas_model: MeasModel):
    """Gradient of the cost function f in `analytical_smoothing_cost`
    Here, the full trajectory x_1:K is interpreted as one vector (x_1^T, ..., x_K)^T with K d_x elements.

    Args:
        x_0: current iterate
            represented as a np.array(K, D_x).
        measurements: measurements for a time sequence 1, ..., K
            represented as a list of length K of np.array(D_y,)
    """
    K = len(measurements)
    D_x = m_1_0.shape[0]
    grad_pred = np.zeros((K * D_x, ))
    grad_update = np.zeros(grad_pred.shape)

    prior_diff = x_0[0, :] - m_1_0
    motion_diff = x_0[1:, :] - motion_model.map_set(x_0[:-1, :], None)

    grad_pred[0:D_x] = np.linalg.inv(P_1_0) @ prior_diff
    F_1 = motion_model.jacobian(x_0[0, :], 1)
    grad_update[0:D_x] = F_1.T @ np.linalg.inv(
        motion_model.proc_noise(1)) @ motion_diff[0, :]

    for k_ind in range(1, K - 1):
        k = k_ind + 1
        grad_pred[k_ind * D_x:(k_ind + 1) * D_x] = (np.linalg.inv(
            motion_model.proc_noise(k - 1)) @ motion_diff[k_ind - 1, :])
        F_k = motion_model.jacobian(x_0[k_ind, :], k)
        grad_update[k_ind * D_x:(k_ind + 1) * D_x] = (F_k.T @ np.linalg.inv(
            motion_model.proc_noise(k)) @ motion_diff[k_ind, :])

    grad_update[0:D_x] = np.linalg.inv(
        motion_model.proc_noise(K - 1)) @ motion_diff[-1, :]

    meas_diff = measurements - meas_model.map_set(x_0, None)
    grad_meas = np.zeros(grad_pred.shape)
    for k_ind in range(0, K):
        k = k_ind + 1
        if any(np.isnan(meas_diff[k_ind, :])):
            continue
        H_k = meas_model.jacobian(x_0[k_ind, :], k)
        R_k_inv = np.linalg.inv(meas_model.meas_noise(k))
        grad_meas[k_ind * D_x:(k_ind + 1) *
                  D_x] = H_k.T @ R_k_inv @ meas_diff[k_ind, :]

    return grad_pred - grad_meas  # - grad_update