Exemplo n.º 1
0
    def update(self, z, R=None, H=None):

        # set to None to force recompute
        self._log_likelihood = None
        self._likelihood = None
        self._mahalanobis = None

        if np.all(np.isnan(z)):
            x_mean_velocity = np.mean(self.velocity_x[-5:])
            y_mean_velocity = np.mean(self.velocity_y[-5:])

            z_noise = np.random.normal(loc=0, scale=0, size=2)
            self.z = list(
                self.x[0:2] - [self.velocity_x[-1], self.velocity_y[-1]] + [x_mean_velocity, y_mean_velocity] + z_noise)
            z = list(
                self.x[0:2] - [self.velocity_x[-1], self.velocity_y[-1]] + [x_mean_velocity, y_mean_velocity] + z_noise)

        z = reshape_z(z, self.dim_z, self.x.ndim)
        self.zs.append(z)

        if R is None:
            R = self.R
        elif isscalar(R):
            R = eye(self.dim_z) * R

        if H is None:
            H = self.H

        # y = z - Hx
        # error (residual) between measurement and prediction
        self.y = z - dot(H, self.x)

        # common subexpression for speed
        PHT = dot(self.P, H.T)

        # S = HPH' + R
        # project system uncertainty into measurement space
        self.S = dot(H, PHT) + R
        self.SI = self.inv(self.S)
        # K = PH'inv(S)
        # map system uncertainty into kalman gain
        self.K = dot(PHT, self.SI)

        # x = x + Ky
        # predict new x with residual scaled by the kalman gain
        self.x = self.x + dot(self.K, self.y)

        # P = (I-KH)P(I-KH)' + KRK'
        # This is more numerically stable
        # and works for non-optimal K vs the equation
        # P = (I-KH)P usually seen in the literature.

        I_KH = self._I - dot(self.K, H)
        # self.P = dot(dot(I_KH, self.P), I_KH.T) + dot(dot(self.K, R), self.K.T)
        self.P = dot(I_KH, self.P)

        # save measurement and posterior state
        self.z = deepcopy(z)
        self.x_post = self.x.copy()
        self.P_post = self.P.copy()
    def update(self, z, R_inv=None):
        """
        Add a new measurement (z) to the kalman filter. If z is None, nothing
        is changed.

        Parameters
        ----------

        z : np.array
            measurement for this update.

        R : np.array, scalar, or None
            Optionally provide R to override the measurement noise for this
            one call, otherwise  self.R will be used.
        """

        if z is None:
            return

        if R_inv is None:
            R_inv = self.R_inv
        elif np.isscalar(R_inv):
            R_inv = eye(self.dim_z) * R_inv

        # rename for readability and a tiny extra bit of speed
        H = self.H
        H_T = H.T
        P_inv = self.P_inv
        x = self.x

        if self._no_information:
            self.x = dot(P_inv, x) + dot(H_T, R_inv).dot(z)
            self.P_inv = P_inv + dot(H_T, R_inv).dot(H)
            self.log_likelihood = math.log(sys.float_info.min)
            self.likelihood = sys.float_info.min

        else:
            # y = z - Hx
            # error (residual) between measurement and prediction
            self.y = z - dot(H, x)

            # S = HPH' + R
            # project system uncertainty into measurement space
            self.S = P_inv + dot(H_T, R_inv).dot(H)
            self.K = dot(self.inv(self.S), H_T).dot(R_inv)

            # x = x + Ky
            # predict new x with residual scaled by the kalman gain
            self.x = x + dot(self.K, self.y)
            self.P_inv = P_inv + dot(H_T, R_inv).dot(H)

            self.z = np.copy(reshape_z(z, self.dim_z, np.ndim(self.x)))

            if self.compute_log_likelihood:
                self.log_likelihood = logpdf(x=self.y, cov=self.S)
                self.likelihood = math.exp(self.log_likelihood)
                if self.likelihood == 0:
                    self.likelihood = sys.float_info.min
Exemplo n.º 3
0
    def update(self, z, R=None, H=None):
        """与原update函数的唯一不同在于P更新的方式可以调节,为了保证其它功能正常,仍然保留原始的内容
        """

        # set to None to force recompute
        self._log_likelihood = None
        self._likelihood = None
        self._mahalanobis = None

        if z is None:
            self.z = np.array([[None] * self.dim_z]).T
            self.x_post = self.x.copy()
            self.P_post = self.P.copy()
            self.y = np.zeros((self.dim_z, 1))
            return

        z = reshape_z(z, self.dim_z, self.x.ndim)

        if R is None:
            R = self.R
        elif np.isscalar(R):
            R = eye(self.dim_z) * R

        if H is None:
            H = self.H

        # y = z - Hx
        # error (residual) between measurement and prediction
        self.y = z - dot(H, self.x)

        # common subexpression for speed
        PHT = dot(self.P, H.T)

        # S = HPH' + R
        # project system uncertainty into measurement space
        self.S = dot(H, PHT) + R
        self.SI = self.inv(self.S)
        # K = PH'inv(S)
        # map system uncertainty into kalman gain
        self.K = dot(PHT, self.SI)

        # x = x + Ky
        # predict new x with residual scaled by the kalman gain
        self.x = self.x + dot(self.K, self.y)

        # P = (I-KH)P(I-KH)' + KRK'
        # This is more numerically stable
        # and works for non-optimal K vs the equation
        # P = (I-KH)P usually seen in the literature.

        I_KH = self._I - dot(self.K, H)
        # self.P = dot(I_KH, self.P)   # 通常情况下的处理
        self.P = dot(dot(I_KH, self.P), I_KH.T) + dot(dot(self.K, R), self.K.T)

        # save measurement and posterior state
        self.z = deepcopy(z)
        self.x_post = self.x.copy()
        self.P_post = self.P.copy()
Exemplo n.º 4
0
def update_steadystate(x, z, K, H=None):
    """
    Add a new measurement (z) to the Kalman filter. If z is None, nothing
    is changed.


    Parameters
    ----------

    x : numpy.array(dim_x, 1), or float
        State estimate vector


    z : (dim_z, 1): array_like
        measurement for this update. z can be a scalar if dim_z is 1,
        otherwise it must be convertible to a column vector.

    K : numpy.array, or float
        Kalman gain matrix

    H : numpy.array(dim_x, dim_x), or float, optional
        Measurement function. If not provided, a value of 1 is assumed.

    Returns
    -------

    x : numpy.array
        Posterior state estimate vector

    Examples
    --------

    This can handle either the multidimensional or unidimensional case. If
    all parameters are floats instead of arrays the filter will still work,
    and return floats for x, P as the result.

    >>> update_steadystate(1, 2, 1)  # univariate
    >>> update_steadystate(x, P, z, H)
    """


    if z is None:
        return x

    if H is None:
        H = np.array([1])

    if np.isscalar(H):
        H = np.array([H])

    Hx = np.atleast_1d(dot(H, x))
    z = reshape_z(z, Hx.shape[0], x.ndim)

    # error (residual) between measurement and prediction
    y = z - Hx

    # estimate new x with residual scaled by the kalman gain
    return x + dot(K, y)
Exemplo n.º 5
0
    def __init__(self, dim_x, dim_z, dim_u=0):

        self.dim_x = dim_x
        self.dim_z = dim_z
        self.dim_u = dim_u

        # Set templates that will hook the assignments and cast to
        # the specific shape
        self.x = properties.MatrixTemplate(dim_x, 1)
        self.P = properties.MatrixTemplate(dim_x, dim_x)
        self.Q = properties.MatrixTemplate(dim_x, dim_x)
        self.B = properties.MatrixTemplate(dim_x, dim_u)
        self.F = properties.MatrixTemplate(dim_x, dim_x)
        self.R = properties.MatrixTemplate(dim_z, dim_z)
        self.z = properties.MatrixTemplate(dim_z, 1)


        self.x = zeros((dim_x, 1)) # state
        self.P = eye(dim_x)        # uncertainty covariance
        self.B = 0                 # control transition matrix
        self.F = np.eye(dim_x)     # state transition matrix
        self.R = eye(dim_z)        # state uncertainty
        self.Q = eye(dim_x)        # process uncertainty
        self.y = zeros((dim_z, 1)) # residual

        z = np.array([None]*self.dim_z)
        self.z = reshape_z(z, self.dim_z, self.x.ndim)

        # gain and residual are computed during the innovation step. We
        # save them so that in case you want to inspect them for various
        # purposes
        self.K = np.zeros(self.x.shape) # kalman gain
        self.y = zeros((dim_z, 1))
        self.S = np.zeros((dim_z, dim_z))   # system uncertainty
        self.SI = np.zeros((dim_z, dim_z))  # inverse system uncertainty

        # identity matrix. Do not alter this.
        self._I = np.eye(dim_x)

        self._log_likelihood = log(sys.float_info.min)
        self._likelihood = sys.float_info.min
        self._mahalanobis = None

        # these will always be a copy of x,P after predict() is called
        self.x_prior = self.x.copy()
        self.P_prior = self.P.copy()

        # these will always be a copy of x,P after update() is called
        self.x_post = self.x.copy()
        self.P_post = self.P.copy()
Exemplo n.º 6
0
    def get_update(self, z=None):
        """
        Computes the new estimate based on measurement `z` and returns it
        without altering the state of the filter.

        Parameters
        ----------

        z : (dim_z, 1): array_like
            measurement for this update. z can be a scalar if dim_z is 1,
            otherwise it must be convertible to a column vector.

        Returns
        -------

        (x, P) : tuple
            State vector and covariance array of the update.
       """

        if z is None:
            return self.x, self.P
        z = reshape_z(z, self.dim_z, self.x.ndim)

        R = self.R
        H = self.H
        P = self.P
        x = self.x

        # error (residual) between measurement and prediction
        y = z - dot(H, x)

        # common subexpression for speed
        PHT = dot(P, H.T)

        # project system uncertainty into measurement space
        S = dot(H, PHT) + R

        # map system uncertainty into kalman gain
        K = dot(PHT, self.inv(S))

        # predict new x with residual scaled by the kalman gain
        x = x + dot(K, y)

        # P = (I-KH)P(I-KH)' + KRK'
        I_KH = self._I - dot(K, H)
        P = dot(dot(I_KH, P), I_KH.T) + dot(dot(K, R), K.T)

        return x, P
Exemplo n.º 7
0
    def __init__(self, dim_x, dim_z, dim_u=0, compute_log_likelihood=True):
        if dim_z < 1:
            raise ValueError('dim_x must be 1 or greater')
        if dim_z < 1:
            raise ValueError('dim_x must be 1 or greater')
        if dim_u < 0:
            raise ValueError('dim_x must be 0 or greater')

        self.dim_x = dim_x
        self.dim_z = dim_z
        self.dim_u = dim_u

        self.x = zeros((dim_x, 1))  # state
        self.P = eye(dim_x)  # uncertainty covariance
        self.Q = eye(dim_x)  # process uncertainty
        self.B = None  # control transition matrix
        self.F = eye(dim_x)  # state transition matrix
        self.H = zeros((dim_z, dim_x))  # Measurement function
        self.R = eye(dim_z)  # state uncertainty
        self._alpha_sq = 1.  # fading memory control
        self.M = np.zeros(
            (dim_z, dim_z))  # process-measurement cross correlation

        self.z = reshape_z(zeros((dim_z)), self.dim_z, self.x.ndim)

        # gain and residual are computed during the innovation step. We
        # save them so that in case you want to inspect them for various
        # purposes
        self.K = np.zeros((dim_x, dim_z))  # kalman gain
        self.y = zeros((dim_z, 1))
        self.S = np.zeros((dim_z, dim_z))  # system uncertainty

        # identity matrix. Do not alter this.
        self._I = np.eye(dim_x)

        # these will always be a copy of x,P after predict() is called
        self.x_prior = np.copy(self.x)
        self.P_prior = np.copy(self.P)

        self.compute_log_likelihood = compute_log_likelihood
        self.log_likelihood = math.log(sys.float_info.min)
        self.likelihood = sys.float_info.min

        self.inv = np.linalg.inv
    def __init__(self, dim_x, dim_z, dim_u=0):
        
        self.dim_x = dim_x
        self.dim_z = dim_z
        self.dim_u = dim_u

        self.x = zeros((dim_x, 1)) # state
        # uncertainty covariance
        self.U = eye(dim_x)        
        self.D = ones((dim_x))
        self.B = 0                 # control transition matrix
        self.F = np.eye(dim_x)     # state transition matrix
        # state uncertainty
        self.Dm = eye(dim_z)       #Decorrelation matrix
        self.Ur = eye(dim_z)       #Decorrelation matrix
        self.Dr = ones((dim_z))
        # process uncertainty
        self.Uq = eye(dim_x)        
        self.Dq = ones((dim_x))
        
        z = np.array([None]*self.dim_z)
        self.z = reshape_z(z, self.dim_z, self.x.ndim)
        
        # residual is computed during the innovation step. We
        # save them so that in case you want to inspect it for various
        # purposes
        self.y = zeros((dim_z, 1)) # residual
        self.S = np.zeros((dim_z, dim_z))   # system uncertainty
        self.SI = np.zeros((dim_z, dim_z))  # inverse system uncertainty
        
        self._log_likelihood = log(sys.float_info.min)
        self._likelihood = sys.float_info.min
        self._mahalanobis = None

        # these will always be a copy of x,P after predict() is called
        self.x_prior = self.x.copy()
        self.U_prior = self.U.copy()
        self.D_prior = self.D.copy()

        # these will always be a copy of x,P after update() is called
        self.x_post = self.x.copy()
        self.U_post = self.U.copy()
        self.D_post = self.D.copy()
Exemplo n.º 9
0
    def __init__(self, dim_x, dim_z, dim_u=0):

        self.dim_x = dim_x
        self.dim_z = dim_z
        self.dim_u = dim_u

        self.x = zeros((dim_x, 1)) # state
        self.P = eye(dim_x)        # uncertainty covariance
        self.B = 0                 # control transition matrix
        self.F = np.eye(dim_x)     # state transition matrix
        self.R = eye(dim_z)        # state uncertainty
        self.Q = eye(dim_x)        # process uncertainty
        self.y = zeros((dim_z, 1)) # residual

        z = np.array([None]*self.dim_z)
        self.z = reshape_z(z, self.dim_z, self.x.ndim)

        # gain and residual are computed during the innovation step. We
        # save them so that in case you want to inspect them for various
        # purposes
        self.K = np.zeros(self.x.shape) # kalman gain
        self.y = zeros((dim_z, 1))
        self.S = np.zeros((dim_z, dim_z))   # system uncertainty
        self.SI = np.zeros((dim_z, dim_z))  # inverse system uncertainty

        # identity matrix. Do not alter this.
        self._I = np.eye(dim_x)

        self._log_likelihood = log(sys.float_info.min)
        self._likelihood = sys.float_info.min
        self._mahalanobis = None

        # these will always be a copy of x,P after predict() is called
        self.x_prior = self.x.copy()
        self.P_prior = self.P.copy()

        # these will always be a copy of x,P after update() is called
        self.x_post = self.x.copy()
        self.P_post = self.P.copy()
Exemplo n.º 10
0
    def find_likelihood(self, z, R=None, H=None):
        if np.all(np.isnan(z)):
            x_mean_velocity = np.mean(self.velocity_x[-5:])
            y_mean_velocity = np.mean(self.velocity_y[-5:])

            z_noise = np.random.normal(loc=0, scale=0, size=2)
            z = list(
                self.x[0:2] - [self.velocity_x[-1], self.velocity_y[-1]] + [x_mean_velocity, y_mean_velocity] + z_noise)
        z = reshape_z(z, self.dim_z, self.x.ndim)
        if R is None:
            R = self.R
        elif isscalar(R):
            R = eye(self.dim_z) * R
        if H is None:
            H = self.H
        y = z - dot(H, self.x)
        PHT = dot(self.P, H.T)
        S = dot(H, PHT) + R
        _log_likelihood = logpdf(x=y, cov=S)

        _likelihood = exp(_log_likelihood)
        if _likelihood == 0:
            _likelihood = sys.float_info.min
        return _likelihood
Exemplo n.º 11
0
def update(self, z, R=None, H=None):
    """
        Add a new measurement (z) to the Kalman filter.
        If z is None, nothing is computed. However, x_post and P_post are
        updated with the prior (x_prior, P_prior), and self.z is set to None.
        Parameters
        ----------
        z : (dim_z, 1): array_like
            measurement for this update. z can be a scalar if dim_z is 1,
            otherwise it must be convertible to a column vector.
            If you pass in a value of H, z must be a column vector the
            of the correct size.
        R : np.array, scalar, or None
            Optionally provide R to override the measurement noise for this
            one call, otherwise  self.R will be used.
        H : np.array, or None
            Optionally provide H to override the measurement function for this
            one call, otherwise self.H will be used.
        """

    # set to None to force recompute
    self._log_likelihood = None
    self._likelihood = None
    self._mahalanobis = None

    if z is None:
        self.z = np.array([[None] * self.dim_z]).T
        self.x_post = self.x.copy()
        self.P_post = self.P.copy()
        self.y = zeros((self.dim_z, 1))
        return

    if R is None:
        R = self.R
    elif isscalar(R):
        R = eye(self.dim_z) * R

    if H is None:
        z = reshape_z(z, self.dim_z, self.x.ndim)
        H = self.H

    # y = z - Hx
    # error (residual) between measurement and prediction
    self.y = z - dot(H, self.x)

    # common subexpression for speed
    PHT = dot(self.P, H.T)

    # S = HPH' + R
    # project system uncertainty into measurement space
    self.S = dot(H, PHT) + R
    self.SI = self.inv(self.S)
    # K = PH'inv(S)
    # map system uncertainty into kalman gain
    self.K = dot(PHT, self.SI)

    # x = x + Ky
    # predict new x with residual scaled by the kalman gain
    self.x = self.x + dot(self.K, self.y)

    # P = (I-KH)P(I-KH)' + KRK'
    # This is more numerically stable
    # and works for non-optimal K vs the equation
    # P = (I-KH)P usually seen in the literature.

    I_KH = self._I - dot(self.K, H)
    self.P = dot(dot(I_KH, self.P), I_KH.T) + dot(dot(self.K, R), self.K.T)

    # save measurement and posterior state
    self.z = deepcopy(z)
    self.x_post = self.x.copy()
    self.P_post = self.P.copy()
Exemplo n.º 12
0
    def update_correlated(self, z, R=None, H=None):
        """ Add a new measurement (z) to the Kalman filter assuming that
        process noise and measurement noise are correlated as defined in
        the `self.M` matrix.

        If z is None, nothing is changed.

        Parameters
        ----------
        z : (dim_z, 1): array_like
            measurement for this update. z can be a scalar if dim_z is 1,
            otherwise it must be convertible to a column vector.

        R : np.array, scalar, or None
            Optionally provide R to override the measurement noise for this
            one call, otherwise  self.R will be used.

        H : np.array,  or None
            Optionally provide H to override the measurement function for this
            one call, otherwise  self.H will be used.
        """

        if z is None:
            return

        z = reshape_z(z, self.dim_z, self.x.ndim)

        if R is None:
            R = self.R
        elif isscalar(R):
            R = eye(self.dim_z) * R

        # rename for readability and a tiny extra bit of speed
        if H is None:
            H = self.H

        # handle special case: if z is in form [[z]] but x is not a column
        # vector dimensions will not match
        if self.x.ndim == 1 and shape(z) == (1, 1):
            z = z[0]

        if shape(z) == ():  # is it scalar, e.g. z=3 or z=np.array(3)
            z = np.asarray([z])

        # y = z - Hx
        # error (residual) between measurement and prediction
        self.y = z - dot(H, self.x)

        # common subexpression for speed
        PHT = dot(self.P, H.T)

        # project system uncertainty into measurement space
        self.S = dot(H, PHT) + dot(H, self.M) + dot(self.M.T, H.T) + R

        # K = PH'inv(S)
        # map system uncertainty into kalman gain
        self.K = dot(PHT + self.M, self.inv(self.S))

        # x = x + Ky
        # predict new x with residual scaled by the kalman gain
        self.x = self.x + dot(self.K, self.y)
        self.P = self.P - dot(self.K, dot(H, self.P) + self.M.T)

        self.z = np.copy(z)  # save the measurement

        if self.compute_log_likelihood:
            self.log_likelihood = logpdf(x=self.y, cov=self.S)
            self.likelihood = math.exp(self.log_likelihood)
            if self.likelihood == 0:
                self.likelihood = sys.float_info.min
Exemplo n.º 13
0
    def update_steadystate(self, z):
        """
        Add a new measurement (z) to the Kalman filter without recomputing
        the Kalman gain K, the state covariance P, or the system
        uncertainty S.

        You can use this for LTI systems since the Kalman gain and covariance
        converge to a fixed value. Precompute these and assign them explicitly,
        or run the Kalman filter using the normal predict()/update(0 cycle
        until they converge.

        The main advantage of this call is speed. We do significantly less
        computation, notably avoiding a costly matrix inversion.

        Use in conjunction with predict_steadystate(), otherwise P will grow
        without bound.

        Parameters
        ----------
        z : (dim_z, 1): array_like
            measurement for this update. z can be a scalar if dim_z is 1,
            otherwise it must be convertible to a column vector.


        Examples
        --------
        >>> cv = kinematic_kf(dim=3, order=2) # 3D const velocity filter
        >>> # let filter converge on representative data, then save k and P
        >>> for i in range(100):
        >>>     cv.predict()
        >>>     cv.update([i, i, i])
        >>> saved_k = np.copy(cv.K)
        >>> saved_P = np.copy(cv.P)

        later on:

        >>> cv = kinematic_kf(dim=3, order=2) # 3D const velocity filter
        >>> cv.K = np.copy(saved_K)
        >>> cv.P = np.copy(saved_P)
        >>> for i in range(100):
        >>>     cv.predict_steadystate()
        >>>     cv.update_steadystate([i, i, i])
        """

        if z is None:
            return

        z = reshape_z(z, self.dim_z, self.x.ndim)

        # y = z - Hx
        # error (residual) between measurement and prediction
        self.y = z - dot(self.H, self.x)

        if self.compute_log_likelihood:
            S = dot(dot(self.H, self.P), self.H.T) + self.R

        # x = x + Ky
        # predict new x with residual scaled by the kalman gain
        self.x = self.x + dot(self.K, self.y)

        self.z = np.copy(z)  # save the measurement

        if self.compute_log_likelihood:
            self.log_likelihood = logpdf(x=self.y, cov=S)
            self.likelihood = math.exp(self.log_likelihood)
            if self.likelihood == 0:
                self.likelihood = sys.float_info.min
Exemplo n.º 14
0
    def update(self, z, R=None, H=None):
        """
        Add a new measurement (z) to the Kalman filter. If z is None, nothing
        is changed.

        Parameters
        ----------
        z : (dim_z, 1): array_like
            measurement for this update. z can be a scalar if dim_z is 1,
            otherwise it must be convertible to a column vector.

        R : np.array, scalar, or None
            Optionally provide R to override the measurement noise for this
            one call, otherwise  self.R will be used.

        H : np.array, or None
            Optionally provide H to override the measurement function for this
            one call, otherwise self.H will be used.
        """

        if z is None:
            return

        z = reshape_z(z, self.dim_z, self.x.ndim)

        if R is None:
            R = self.R
        elif isscalar(R):
            R = eye(self.dim_z) * R

        if H is None:
            H = self.H

        # y = z - Hx
        # error (residual) between measurement and prediction
        self.y = z - dot(H, self.x)

        # common subexpression for speed
        PHT = dot(self.P, H.T)

        # S = HPH' + R
        # project system uncertainty into measurement space
        self.S = dot(H, PHT) + R

        # K = PH'inv(S)
        # map system uncertainty into kalman gain
        self.K = dot(PHT, self.inv(self.S))

        # x = x + Ky
        # predict new x with residual scaled by the kalman gain
        self.x = self.x + dot(self.K, self.y)

        # P = (I-KH)P(I-KH)' + KRK'
        # This is more numerically stable
        # and works for non-optimal K vs the equation
        # P = (I-KH)P usually seen in the literature.

        I_KH = self._I - dot(self.K, H)
        self.P = dot(dot(I_KH, self.P), I_KH.T) + dot(dot(self.K, R), self.K.T)

        self.z = np.copy(z)  # save the measurement

        if self.compute_log_likelihood:
            self.log_likelihood = logpdf(x=self.y, cov=self.S)
            self.likelihood = math.exp(self.log_likelihood)
            if self.likelihood == 0:
                self.likelihood = sys.float_info.min
Exemplo n.º 15
0
def update(x, P, z, R, H=None, return_all=False):
    """
    Add a new measurement (z) to the Kalman filter. If z is None, nothing
    is changed.

    This can handle either the multidimensional or unidimensional case. If
    all parameters are floats instead of arrays the filter will still work,
    and return floats for x, P as the result.

    update(1, 2, 1, 1, 1)  # univariate
    update(x, P, 1



    Parameters
    ----------

    x : numpy.array(dim_x, 1), or float
        State estimate vector

    P : numpy.array(dim_x, dim_x), or float
        Covariance matrix

    z : (dim_z, 1): array_like
        measurement for this update. z can be a scalar if dim_z is 1,
        otherwise it must be convertible to a column vector.

    R : numpy.array(dim_z, dim_z), or float
        Measurement noise matrix

    H : numpy.array(dim_x, dim_x), or float, optional
        Measurement function. If not provided, a value of 1 is assumed.

    return_all : bool, default False
        If true, y, K, S, and log_likelihood are returned, otherwise
        only x and P are returned.

    Returns
    -------

    x : numpy.array
        Posterior state estimate vector

    P : numpy.array
        Posterior covariance matrix

    y : numpy.array or scalar
        Residua. Difference between measurement and state in measurement space

    K : numpy.array
        Kalman gain

    S : numpy.array
        System uncertainty in measurement space

    log_likelihood : float
        log likelihood of the measurement
    """

    #pylint: disable=bare-except

    if z is None:
        if return_all:
            return x, P, None, None, None, None
        return x, P

    if H is None:
        H = np.array([1])

    if np.isscalar(H):
        H = np.array([H])

    Hx = np.atleast_1d(dot(H, x))
    z = reshape_z(z, Hx.shape[0], x.ndim)

    # error (residual) between measurement and prediction
    y = z - Hx

    # project system uncertainty into measurement space
    S = dot(dot(H, P), H.T) + R

    # map system uncertainty into kalman gain
    try:
        K = dot(dot(P, H.T), linalg.inv(S))
    except:
        # can't invert a 1D array, annoyingly
        K = dot(dot(P, H.T), 1. / S)

    # predict new x with residual scaled by the kalman gain
    x = x + dot(K, y)

    # P = (I-KH)P(I-KH)' + KRK'
    KH = dot(K, H)

    try:
        I_KH = np.eye(KH.shape[0]) - KH
    except:
        I_KH = np.array([1 - KH])
    P = dot(dot(I_KH, P), I_KH.T) + dot(dot(K, R), K.T)

    if return_all:
        # compute log likelihood
        log_likelihood = logpdf(z, dot(H, x), S)
        return x, P, y, K, S, log_likelihood
    return x, P
Exemplo n.º 16
0
    def update(self, z, R=None, H=None, timestamp=-1):
        # print(self.id, " update !")
        if timestamp == -1:
            warnings.warn(
                "timestamp not specified: default time increment will be used")

            # timestep set to last time difference between subsequent measurements
            self.dt = DEFAULT_TIME_INCREMENT
            self.prev_ti = self.curr_ti
            self.curr_ti += DEFAULT_TIME_INCREMENT

        else:
            # timestep set to last time difference between subsequent measurements
            self.prev_ti = self.curr_ti
            self.curr_ti = timestamp
            self.dt = timestamp - self.curr_ti

        # data validation
        v = z - dot(self.H, self.x_prior)
        S = self.R + dot(self.H, dot(self.P_prior, self.H.T))
        validation_region = dot(v.T, dot(self.inv(S), v))
        # print("nodal validation region = ", validation_region)
        if validation_region >= self.NODAL_VALIDATION_BOUND:
            print("rejected nodal validation region:", validation_region)
            return True

        # set to None to force recompute
        self._log_likelihood = None
        self._likelihood = None
        self._mahalanobis = None

        if z is None:
            self.z = np.array([[None] * self.dim_z]).T
            self.x_post = self.x.copy()
            self.P_post = self.P.copy()
            self.y = zeros((self.dim_z, 1))
            return

        z = reshape_z(z, self.dim_z, self.x.ndim)
        self.z_current = z

        if R is None:
            R = self.R
        elif isscalar(R):
            R = eye(self.dim_z) * R

        if H is None:
            H = self.H

        # y = z - Hx
        # error (residual) between measurement and prediction
        self.y = z - dot(H, self.x)

        # inverse of R, used in information form of variance update and Kalman gain
        RI = self.inv(R)

        # information form of variance update equation
        # PI = PI + HT*RI*H
        self.PI = self.PI + dot(dot(H.T, RI), H)
        self.PI_post = self.PI.copy()

        # needed for Kalman gain matrix
        self.P = self.inv(self.PI)

        # Kalman gain matrix
        # W = P*HT*RI
        self.W = dot(dot(self.P, H.T), RI)

        # x = x + Wy
        self.x = self.x + dot(self.W, self.y)

        # update x_post & P_post
        self.x_post = self.x
        self.P_post = self.P