コード例 #1
0
ファイル: EKF.py プロジェクト: Filos0f/SIS-assignment
    def predict_update(self, z, HJacobian, Hx, args=(), hx_args=(), u=0):
        if not isinstance(args, tuple):
            args = (args, )

        if not isinstance(hx_args, tuple):
            hx_args = (hx_args, )

        if np.isscalar(z) and self.dim_z == 1:
            z = np.asarray([z], float)

        F = self._F
        B = self._B
        P = self._P
        Q = self._Q
        R = self._R
        x = self._x

        H = HJacobian(x, *args)

        # predict step
        x = dot(F, x) + dot(B, u)
        P = dot3(F, P, F.T) + Q

        # update step
        S = dot3(H, P, H.T) + R
        K = dot3(P, H.T, linalg.inv(S))
        self._K = K

        self._x = x + dot(K, (z - Hx(x, *hx_args)))

        I_KH = self._I - dot(K, H)
        self._P = dot3(I_KH, P, I_KH.T) + dot3(K, R, K.T)
コード例 #2
0
ファイル: kalman_filter.py プロジェクト: feixh/filterpy
def rts_smoother(Xs, Ps, Fs, Qs):
    """ Runs the Rauch-Tung-Striebal Kalman smoother on a set of
    means and covariances computed by a Kalman filter. The usual input
    would come from the output of `KalmanFilter.batch_filter()`.

    **Parameters**

    Xs : numpy.array
       array of the means (state variable x) of the output of a Kalman
       filter.

    Ps : numpy.array
        array of the covariances of the output of a kalman filter.

    Fs : list-like collection of numpy.array
        State transition matrix of the Kalman filter at each time step.
        Optional, if not provided the filter's self.F will be used

    Qs : list-like collection of numpy.array, optional
        Process noise of the Kalman filter at each time step. Optional,
        if not provided the filter's self.Q will be used

    **Returns**

    'x' : numpy.ndarray
       smoothed means

    'P' : numpy.ndarray
       smoothed state covariances

    'K' : numpy.ndarray
        smoother gain at each step


    **Example**::

        zs = [t + random.randn()*4 for t in range (40)]

        (mu, cov, _, _) = kalman.batch_filter(zs)
        (x, P, K) = rts_smoother(mu, cov, kf.F, kf.Q)
    """

    assert len(Xs) == len(Ps)
    n = Xs.shape[0]
    dim_x = Xs.shape[1]

    # smoother gain
    K = zeros((n,dim_x,dim_x))
    x, P = Xs.copy(), Ps.copy()

    for k in range(n-2,-1,-1):
        P_pred = dot3(Fs[k], P[k], Fs[k].T) + Qs[k]

        K[k]  = dot3(P[k], Fs[k].T, linalg.inv(P_pred))
        x[k] += dot (K[k], x[k+1] - dot(Fs[k], x[k]))
        P[k] += dot3 (K[k], P[k+1] - P_pred, K[k].T)

    return (x, P, K)
コード例 #3
0
ファイル: EKF_rako.py プロジェクト: leviscar/kalman_rako
 def update(self,Xsim,uwb_dis):
     H=self.H_jaccacu(Xsim[0:2],Xsim[2:4])
     S = dot3(H, self.P, H.T) + self.R
     self.K = dot3(self.P, H.T, linalg.inv (S))
     residual=self.Z_fcacu(Xsim,uwb_dis)-dot(H,Xsim)#事实上Xsim是和pos_imu一致的,都是要用IMU数据得到
     Xesti=Xsim+self.K*residual
     I_KH=np.eye(4)-dot(self.K,H)
     self.P=dot(I_KH,self.P)
     #在filterpy中看到了另外一种计算协方差的计算公式
     #self.P=dot3(I_KH, self.P, I_KH.T) + dot3(self.K, self.R, self.K.T)
     self.XLastEsti=Xesti
     return Xesti
コード例 #4
0
ファイル: kalman_filter.py プロジェクト: PepSalehi/filterpy
    def update(self, z, R=None, H=None):
        """
        Add a new measurement (z) to the kalman filter. If z is None, nothing
        is changed.

        **Parameters**

        z : np.array
            measurement for this update.

        R : np.array, scalar, or None
            Optionally provide R to override the measurement noise for this
            one call, otherwise  self.R will be used.
        """

        if z is None:
            return

        if R is None:
            R = self.R
        elif isscalar(R):
            R = eye(self.dim_z) * R

        # rename for readability and a tiny extra bit of speed
        if H is None:
            H = self.H
        P = self._P
        x = self._x

        # y = z - Hx
        # error (residual) between measurement and prediction
        self._y = z - dot(H, x)

        # S = HPH' + R
        # project system uncertainty into measurement space
        S = dot3(H, P, H.T) + R

        # K = PH'inv(S)
        # map system uncertainty into kalman gain
        K = dot3(P, H.T, linalg.inv(S))

        # x = x + Ky
        # predict new x with residual scaled by the kalman gain
        self._x = x + dot(K, self._y)

        # P = (I-KH)P(I-KH)' + KRK'
        I_KH = self._I - dot(K, H)
        self._P = dot3(I_KH, P, I_KH.T) + dot3(K, R, K.T)

        self._S = S
        self._K = K
コード例 #5
0
ファイル: kalman_filter.py プロジェクト: ytaleb17/filterpy
    def update(self, z, R=None, H=None):
        """
        Add a new measurement (z) to the kalman filter. If z is None, nothing
        is changed.

        **Parameters**

        z : np.array
            measurement for this update.

        R : np.array, scalar, or None
            Optionally provide R to override the measurement noise for this
            one call, otherwise  self.R will be used.
        """

        if z is None:
            return

        if R is None:
            R = self.R
        elif isscalar(R):
            R = eye(self.dim_z) * R

        # rename for readability and a tiny extra bit of speed
        if H is None:
            H = self.H
        P = self._P
        x = self._x

        # y = z - Hx
        # error (residual) between measurement and prediction
        self._y = z - dot(H, x)

        # S = HPH' + R
        # project system uncertainty into measurement space
        S = dot3(H, P, H.T) + R

        # K = PH'inv(S)
        # map system uncertainty into kalman gain
        K = dot3(P, H.T, linalg.inv(S))

        # x = x + Ky
        # predict new x with residual scaled by the kalman gain
        self._x = x + dot(K, self._y)

        # P = (I-KH)P(I-KH)' + KRK'
        I_KH = self._I - dot(K, H)
        self._P = dot3(I_KH, P, I_KH.T) + dot3(K, R, K.T)

        self._S = S
        self._K = K
コード例 #6
0
    def update(self, z, R=None, H=None):

        if z is None:
            return

        if R is None:
            R = self.R
        elif isscalar(R):
            R = eye(self.dim_z) * R

        # rename for readability and a tiny extra bit of speed
        if H is None:
            H = self.H
        P = self.P
        x = self.x

        # handle special case: if z is in form [[z]] but x is not a column
        # vector dimensions will not match
        if x.ndim==1 and shape(z) == (1,1):
            z = z[0]

        if shape(z) == (): # is it scalar, e.g. z=3 or z=np.array(3)
            z = np.asarray([z])

        # y = z - Hx
        # error (residual) between measurement and prediction
        Hx = dot(H, x)

        assert shape(Hx) == shape(z) or (shape(Hx) == (1,1) and shape(z) == (1,)), \
               'shape of z should be {}, but it is {}'.format(
               shape(Hx), shape(z))
        self.y = z - Hx

        # S = HPH' + R
        # project system uncertainty into measurement space
        self.S = dot3(H, P, H.T) + R

        # K = PH'inv(S)
        # map system uncertainty into kalman gain
        self.K = dot3(P, H.T, linalg.inv(self.S))

        # x = x + Ky
        # predict new x with residual scaled by the kalman gain
        self.x = x + dot(self.K, self.y)

        # P = (I-KH)P(I-KH)' + KRK'
        I_KH = self.I - dot(self.K, H)
        self.P = dot3(I_KH, P, I_KH.T) + dot3(self.K, R, self.K.T)

        self.log_likelihood = logpdf(z, dot(H, x), self.S)
コード例 #7
0
    def update(self, z, R_inv=None):
        """
        Add a new measurement (z) to the kalman filter. If z is None, nothing
        is changed.

        Parameters
        ----------

        z : np.array
            measurement for this update.

        R : np.array, scalar, or None
            Optionally provide R to override the measurement noise for this
            one call, otherwise  self.R will be used.
        """

        if z is None:
            return

        if R_inv is None:
            R_inv = self.R_inv
        elif np.isscalar(R_inv):
            R_inv = eye(self.dim_z) * R_inv

        # rename for readability and a tiny extra bit of speed
        H = self.H
        H_T = H.T
        P_inv = self.P_inv
        x = self.x

        if self._no_information:
            self.x = dot(P_inv, x) + dot3(H_T, R_inv, z)
            self.P_inv = P_inv + dot3(H_T, R_inv, H)

        else:
            # y = z - Hx
            # error (residual) between measurement and prediction
            self.y = z - dot(H, x)

            # S = HPH' + R
            # project system uncertainty into measurement space
            self.S = P_inv + dot(H_T, R_inv).dot(H)
            self.K = dot3(inv(self.S), H_T, R_inv)

            # x = x + Ky
            # predict new x with residual scaled by the kalman gain
            self.x = x + dot(self.K, self.y)
            self.P_inv = P_inv + dot3(H_T, R_inv, H)
コード例 #8
0
ファイル: UKF.py プロジェクト: EJHortala/books-2
    def update(self, z, R=None, UT=None, hx_args=()):
        """ Update the UKF with the given measurements. On return,
        self.x and self.P contain the new mean and covariance of the filter.

        Parameters
        ----------

        z : numpy.array of shape (dim_z)
            measurement vector

        R : numpy.array((dim_z, dim_z)), optional
            Measurement noise. If provided, overrides self.R for
            this function call.

        UT : function(sigmas, Wm, Wc, noise_cov), optional
            Optional function to compute the unscented transform for the sigma
            points passed through hx. Typically the default function will
            work - you can use x_mean_fn and z_mean_fn to alter the behavior
            of the unscented transform.

        hx_args : tuple, optional, default (,)
            arguments to be passed into Hx function after the required state
            variable.
        """

        if z is None:
            return

        if not isinstance(hx_args, tuple):
            hx_args = (hx_args, )

        if UT is None:
            UT = unscented_transform

        if R is None:
            R = self.R
        elif isscalar(R):
            R = eye(self._dim_z) * R

        for i in range(self._num_sigmas):
            self.sigmas_h[i] = self.hx(self.sigmas_f[i], *hx_args)

        # mean and covariance of prediction passed through unscented transform
        zp, Pz = UT(self.sigmas_h, self.Wm, self.Wc, R, self.z_mean,
                    self.residual_z)

        # compute cross variance of the state and the measurements
        Pxz = zeros((self._dim_x, self._dim_z))
        for i in range(self._num_sigmas):
            dx = self.residual_x(self.sigmas_f[i], self.x)
            dz = self.residual_z(self.sigmas_h[i], zp)
            Pxz += self.Wc[i] * outer(dx, dz)

        self.K = dot(Pxz, inv(Pz))  # Kalman gain
        self.y = self.residual_z(z, zp)  # residual

        self.x = self.x + dot(self.K, self.y)
        self.P = self.P - dot3(self.K, Pz, self.K.T)

        self.log_likelihood = logpdf(self.y, np.zeros(len(self.y)), Pz)
コード例 #9
0
ファイル: UKF.py プロジェクト: CeasarSS/books
    def update(self, z, R=None, UT=None, hx_args=()):
        """ Update the UKF with the given measurements. On return,
        self.x and self.P contain the new mean and covariance of the filter.

        Parameters
        ----------

        z : numpy.array of shape (dim_z)
            measurement vector

        R : numpy.array((dim_z, dim_z)), optional
            Measurement noise. If provided, overrides self.R for
            this function call.

        UT : function(sigmas, Wm, Wc, noise_cov), optional
            Optional function to compute the unscented transform for the sigma
            points passed through hx. Typically the default function will
            work - you can use x_mean_fn and z_mean_fn to alter the behavior
            of the unscented transform.

        hx_args : tuple, optional, default (,)
            arguments to be passed into Hx function after the required state
            variable.
        """

        if z is None:
            return

        if not isinstance(hx_args, tuple):
            hx_args = (hx_args,)

        if UT is None:
            UT = unscented_transform

        if R is None:
            R = self.R
        elif isscalar(R):
            R = eye(self._dim_z) * R

        for i in range(self._num_sigmas):
            self.sigmas_h[i] = self.hx(self.sigmas_f[i], *hx_args)

        # mean and covariance of prediction passed through unscented transform
        zp, Pz = UT(self.sigmas_h, self.Wm, self.Wc, R, self.z_mean, self.residual_z)

        # compute cross variance of the state and the measurements
        Pxz = zeros((self._dim_x, self._dim_z))
        for i in range(self._num_sigmas):
            dx = self.residual_x(self.sigmas_f[i], self.x)
            dz =  self.residual_z(self.sigmas_h[i], zp)
            Pxz += self.Wc[i] * outer(dx, dz)


        self.K = dot(Pxz, inv(Pz))        # Kalman gain
        self.y = self.residual_z(z, zp)   # residual

        self.x = self.x + dot(self.K, self.y)
        self.P = self.P - dot3(self.K, Pz, self.K.T)

        self.log_likelihood = logpdf(self.y, np.zeros(len(self.y)), Pz)
コード例 #10
0
    def update(self, z, R=None, UT=None, hx_args=()):
        if z is None:
            return
        if not isinstance(hx_args, tuple):
            hx_args = (hx_args,)
        if UT is None:
            UT = unscented_transform
        if R is None:
            R = self.R
        elif np.isscalar(R):
            R = np.eye(self._dim_z) * R

        for i in range(self._num_sigmas):
            self.sigmas_h[i] = self.hx(self.sigmas_f[i], *hx_args)

        # mean and covariance of prediction passed through unscented transform
        zp, Pz = UT(self.sigmas_h, self.Wm, self.Wc, R, self.z_mean, self.residual_z)

        # compute cross variance of the state and the measurements
        Pxz = np.zeros((self.P.shape[0], self._dim_z))  # Changed
        for i in range(self._num_sigmas):
            dx = self.residual_x(self.sigmas_f[i], self.x)
            dz =  self.residual_z(self.sigmas_h[i], zp)
            Pxz += self.Wc[i] * np.outer(dx, dz)

        K = np.dot(Pxz, scipy.linalg.inv(Pz))   # Kalman gain
        y = self.residual_z(z, zp)   #residual
        self.x = state_covcol_diff(self.x, -np.dot(K, y))  # Changed
        self._P = self.P - dot3(K, Pz, K.T)
コード例 #11
0
ファイル: information_filter.py プロジェクト: CeasarSS/books
    def update(self, z, R_inv=None):
        """
        Add a new measurement (z) to the kalman filter. If z is None, nothing
        is changed.

        Parameters
        ----------

        z : np.array
            measurement for this update.

        R : np.array, scalar, or None
            Optionally provide R to override the measurement noise for this
            one call, otherwise  self.R will be used.
        """

        if z is None:
            return

        if R_inv is None:
            R_inv = self._R_inv
        elif np.isscalar(R_inv):
            R_inv = eye(self.dim_z) * R_inv

        # rename for readability and a tiny extra bit of speed
        H = self._H
        H_T = H.T
        P_inv = self._P_inv
        x = self._x

        if self._no_information:
            self._x = dot(P_inv, x) + dot3(H_T, R_inv, z)
            self._P_inv = P_inv + dot3(H_T, R_inv, H)

        else:       # y = z - Hx
            # error (residual) between measurement and prediction
            self._y = z - dot(H, x)

            # S = HPH' + R
            # project system uncertainty into measurement space
            self._S = P_inv + dot(H_T, R_inv).dot (H)
            self._K = dot3(inv(self._S), H_T, R_inv)

            # x = x + Ky
            # predict new x with residual scaled by the kalman gain
            self._x = x + dot(self._K, self._y)
            self._P_inv = P_inv + dot3(H_T, R_inv, H)
コード例 #12
0
ファイル: EKF.py プロジェクト: ytaleb17/filterpy
    def predict_update(self, z, HJacobian, Hx, u=0):
        """ Performs the predict/update innovation of the extended Kalman
        filter.

        **Parameters**

        z : np.array
            measurement for this step.
            If `None`, only predict step is perfomed.

        HJacobian : function
           function which computes the Jacobian of the H matrix (measurement
           function). Takes state variable (self.x) as input, returns H.


        Hx : function
            function which takes a state variable and returns the measurement
            that would correspond to that state.

        u : np.array or scalar
            optional control vector input to the filter.
        """
        if np.isscalar(z) and self.dim_z == 1:
            z = np.asarray([z], float)

        F = self._F
        B = self._B
        P = self._P
        Q = self._Q
        R = self._R
        x = self._x

        H = HJacobian(x)

        # predict step
        x = dot(F, x) + dot(B, u)
        P = dot3(F, P, F.T) + Q

        # update step
        S = dot3(H, P, H.T) + R
        K = dot3(P, H.T, linalg.inv(S))

        self._x = x + dot(K, (z - Hx(x)))

        I_KH = self._I - dot(K, H)
        self._P = dot3(I_KH, P, I_KH.T) + dot3(K, R, K.T)
コード例 #13
0
ファイル: EKF.py プロジェクト: PepSalehi/filterpy
    def predict_update(self, z, HJacobian, Hx, u=0):
        """ Performs the predict/update innovation of the extended Kalman
        filter.

        **Parameters**

        z : np.array
            measurement for this step.
            If `None`, only predict step is perfomed.

        HJacobian : function
           function which computes the Jacobian of the H matrix (measurement
           function). Takes state variable (self.x) as input, returns H.


        Hx : function
            function which takes a state variable and returns the measurement
            that would correspond to that state.

        u : np.array or scalar
            optional control vector input to the filter.
        """
        if np.isscalar(z) and self.dim_z == 1:
            z = np.asarray([z], float)

        F = self._F
        B = self._B
        P = self._P
        Q = self._Q
        R = self._R
        x = self._x

        H = HJacobian(x)

        # predict step
        x = dot(F, x) + dot(B, u)
        P = dot3(F, P, F.T) + Q

        # update step
        S = dot3(H, P, H.T) + R
        K = dot3(P, H.T, linalg.inv (S))

        self._x = x + dot(K, (z - Hx(x)))

        I_KH = self._I - dot(K, H)
        self._P = dot3(I_KH, P, I_KH.T) + dot3(K, R, K.T)
コード例 #14
0
ファイル: imm.py プロジェクト: weiweikong/filterpy
    def update(self, z):
        """
        Add a new measurement (z) to the Kalman filter. If z is None, nothing
        is changed.

        **Parameters**

        z : np.array
            measurement for this update.
        """

        L = zeros(len(self.filters))
        for i, f in enumerate(self.filters):
            f.update(z)
            L[i] = f.likelihood  # prior * likelihood

        # compute mode probabilities for this step
        self.mu = self.cbar * L
        self.mu /= sum(self.mu)  # normalize

        # compute mixed IMM state and covariance
        self.x.fill(0.0)
        self.P.fill(0.0)

        for f, w in zip(self.filters, self.mu):
            self.x += f.x * w

        for f, w in zip(self.filters, self.mu):
            y = f.x - self.x
            self.P += w * (np.outer(y, y) + f.P)

        # initial condition IMM state, covariance
        xs, Ps = [], []
        self.cbar = dot(self.M.T, self.mu)

        omega = np.zeros((self.n, self.n))
        for i in range(self.n):
            omega[i, 0] = self.M[i, 0] * self.mu[i] / self.cbar[0]
            omega[i, 1] = self.M[i, 1] * self.mu[i] / self.cbar[1]

        # compute initial states
        for i, (f, w) in enumerate(zip(self.filters, omega.T)):
            x = np.zeros(self.x.shape)
            for kf, wj in zip(self.filters, w):
                x += kf.x * wj
            xs.append(x)

            P = np.zeros(self.P.shape)
            for kf, wj in zip(self.filters, w):
                y = kf.x - x
                P += wj * (np.outer(y, y) + kf.P)
            Ps.append(P)

        for i in range(len(xs)):
            f = self.filters[i]
            # propagate using the mixed state estimate and covariance
            f.x = dot(f.F, xs[i])
            f.P = dot3(f.F, Ps[i], f.F.T) + f.Q
コード例 #15
0
ファイル: imm.py プロジェクト: mouradmourafiq/filterpy
    def update(self, z):
        """
        Add a new measurement (z) to the Kalman filter. If z is None, nothing
        is changed.

        **Parameters**

        z : np.array
            measurement for this update.
        """

        L = zeros(len(self.filters))
        for i, f in enumerate(self.filters):
            f.update(z)
            L[i] = f.likelihood  # prior * likelihood

        # compute mode probabilities for this step
        self.mu = self.cbar * L
        self.mu /= sum(self.mu)  # normalize

        # compute mixed IMM state and covariance
        self.x.fill(0.)
        self.P.fill(0.)

        for f, w in zip(self.filters, self.mu):
            self.x += f.x * w

        for f, w in zip(self.filters, self.mu):
            y = f.x - self.x
            self.P += w * (np.outer(y, y) + f.P)

        # initial condition IMM state, covariance
        xs, Ps = [], []
        self.cbar = dot(self.M.T, self.mu)

        omega = np.zeros((self.n, self.n))
        for i in range(self.n):
            omega[i, 0] = self.M[i, 0] * self.mu[i] / self.cbar[0]
            omega[i, 1] = self.M[i, 1] * self.mu[i] / self.cbar[1]

        # compute initial states
        for i, (f, w) in enumerate(zip(self.filters, omega.T)):
            x = np.zeros(self.x.shape)
            for kf, wj in zip(self.filters, w):
                x += kf.x * wj
            xs.append(x)

            P = np.zeros(self.P.shape)
            for kf, wj in zip(self.filters, w):
                y = kf.x - x
                P += wj * (np.outer(y, y) + kf.P)
            Ps.append(P)

        for i in range(len(xs)):
            f = self.filters[i]
            # propagate using the mixed state estimate and covariance
            f.x = dot(f.F, xs[i])
            f.P = dot3(f.F, Ps[i], f.F.T) + f.Q
コード例 #16
0
    def update(self, Z):
        """
        Add a new measurement (Z) to the kalman filter. If Z is None, nothing
        is changed.

        Parameters
        ----------

        Z : np.array
            measurement for this update.
        """

        if Z is None:
            return

        # rename for readability and a tiny extra bit of speed
        I = self._I
        gamma = self.gamma
        Q = self._Q
        H = self._H
        P = self._P
        x = self._x
        V_inv = self._V_inv
        F = self._F
        W = self._W

        # common subexpression H.T * V^-1
        HTVI = dot(H.T, V_inv)

        L = linalg.inv(I - gamma*dot(Q, P) + dot3(HTVI, H, P))

        #common subexpression P*L
        PL = dot(P,L)

        K = dot3(F, PL, HTVI)

        self.residual = Z - dot(H, x)

        # x = x + Ky
        # predict new x with residual scaled by the kalman gain
        self._x = self._x + dot(K, self.residual)
        self._P = dot3(F, PL, F.T) + W

        # force P to be symmetric
        self._P = (self._P + self._P.T) / 2
コード例 #17
0
ファイル: test_lsq.py プロジェクト: PacoSpike/filterpy
    def update(self, Z):
        self.x += 1
        self.k += 1
        print("k=", self.k, 1 / self.k, 1 / (self.k + 1))

        S = dot3(self.H, self.P, self.H.T) + self.R
        K1 = dot3(self.P, self.H.T, inv(S))
        # K1 = dot3(self.P, self.H.T, inv(self.R))

        print("K1=", K1[0, 0])
        # print(K)

        I_KH = self.I - dot(K1, self.H)
        y = Z - dot(self.H, self.x)
        print("y=", y)
        self.x = self.x + dot(K1, y)
        self.P = dot(I_KH, self.P)
        print(self.P)
コード例 #18
0
ファイル: test_lsq.py プロジェクト: sauravrt/filterpy
    def update(self, Z):
        self.x += 1
        self.k += 1
        print('k=', self.k, 1 / self.k, 1 / (self.k + 1))

        S = dot3(self.H, self.P, self.H.T) + self.R
        K1 = dot3(self.P, self.H.T, inv(S))
        #K1 = dot3(self.P, self.H.T, inv(self.R))

        print('K1=', K1[0, 0])
        #print(K)

        I_KH = self.I - dot(K1, self.H)
        y = Z - dot(self.H, self.x)
        print('y=', y)
        self.x = self.x + dot(K1, y)
        self.P = dot(I_KH, self.P)
        print(self.P)
コード例 #19
0
    def update(self, z, residual=np.subtract, UT=None):
        """ Update the UKF with the given measurements. On return,
        self.x and self.P contain the new mean and covariance of the filter.

        **Parameters**

        z : numpy.array of shape (dim_z)
            measurement vector

        residual : function (z, z2), optional
            Optional function that computes the residual (difference) between
            the two measurement vectors. If you do not provide this, then the
            built in minus operator will be used. You will normally want to use
            the built in unless your residual computation is nonlinear (for
            example, if they are angles)

        UT : function(sigmas, Wm, Wc, noise_cov), optional
            Optional function to compute the unscented transform for the sigma
            points passed through hx. Typically the default function will
            work, but if for example you are using angles the default method
            of computing means and residuals will not work, and you will have
            to define how to compute it.
        """

        # rename for readability
        sigmas_f = self.sigmas_f
        sigmas_h = self.sigmas_h
        Wm = self.Wm
        Wc = self.Wc
        if UT is None:
            UT = unscented_transform

        # transform sigma points into measurement space
        sigmas_h2 = self.hx(sigmas_f)

        for i in range(self._num_sigmas):
            sigmas_h[i] = self.hx(sigmas_f[i])

        assert sigmas_h2.all() == sigmas_h.all()

        # mean and covariance of prediction passed through UT
        zp, Pz = UT(sigmas_h, Wm, Wc, self.R)

        # compute cross variance of the state and the measurements
        Pxz = zeros((self._dim_x, self._dim_z))
        for i in range(self._num_sigmas):
            Pxz += Wc[i] * np.outer(sigmas_f[i] - self.xp,
                                    residual(sigmas_h[i], zp))

        K = dot(Pxz, inv(Pz))  # Kalman gain
        y = residual(z, zp)  #residual

        self.x = self.xp + dot(K, y)
        self.P = self.Pp - dot3(K, Pz, K.T)
コード例 #20
0
    def update(self, z, R=None, hx_args=()):
        """ Update the CKF with the given measurements. On return,
        self.x and self.P contain the new mean and covariance of the filter.

        Parameters
        ----------

        z : numpy.array of shape (dim_z)
            measurement vector

        R : numpy.array((dim_z, dim_z)), optional
            Measurement noise. If provided, overrides self.R for
            this function call.

        hx_args : tuple, optional, default (,)
            arguments to be passed into Hx function after the required state
            variable.
        """

        if z is None:
            return

        if not isinstance(hx_args, tuple):
            hx_args = (hx_args,)

        if R is None:
            R = self.R
        elif isscalar(R):
            R = eye(self._dim_z) * R

        for k in range(self._num_sigmas):
            self.sigmas_h[k] = self.hx(self.sigmas_f[k], *hx_args)

        # mean and covariance of prediction passed through unscented transform
        #zp, Pz = UT(self.sigmas_h, self.Wm, self.Wc, R, self.z_mean, self.residual_z)
        zp, Pz = ckf_transform(self.sigmas_h, R)

        # compute cross variance of the state and the measurements
        Pxz = zeros((self._dim_x, self._dim_z))
        m = self._num_sigmas  # literaure uses m for scaling factor
        xf = self.x.flatten()
        zpf = zp.flatten()
        for k in range(m):
            dx = self.sigmas_f[k] - xf
            dz =  self.sigmas_h[k] - zpf
            Pxz += outer(dx, dz)

        Pxz /= m

        self.K = dot(Pxz, inv(Pz))        # Kalman gain
        self.y = self.residual_z(z, zp)   #residual

        self.x = self.x + dot(self.K, self.y)
        self.P = self.P - dot3(self.K, Pz, self.K.T)
コード例 #21
0
ファイル: SUKF.py プロジェクト: PepSalehi/filterpy
    def update(self, z, residual=np.subtract, UT=None):
        """ Update the UKF with the given measurements. On return,
        self.x and self.P contain the new mean and covariance of the filter.

        **Parameters**

        z : numpy.array of shape (dim_z)
            measurement vector

        residual : function (z, z2), optional
            Optional function that computes the residual (difference) between
            the two measurement vectors. If you do not provide this, then the
            built in minus operator will be used. You will normally want to use
            the built in unless your residual computation is nonlinear (for
            example, if they are angles)

        UT : function(sigmas, Wm, Wc, noise_cov), optional
            Optional function to compute the unscented transform for the sigma
            points passed through hx. Typically the default function will
            work, but if for example you are using angles the default method
            of computing means and residuals will not work, and you will have
            to define how to compute it.
        """

        # rename for readability
        sigmas_f = self.sigmas_f
        sigmas_h = self.sigmas_h
        Wm = self.Wm
        Wc = self.Wc
        if UT is None:
            UT = unscented_transform

        # transform sigma points into measurement space
        sigmas_h2 = self.hx(sigmas_f)

        for i in range(self._num_sigmas):
            sigmas_h[i] = self.hx(sigmas_f[i])

        assert sigmas_h2.all() == sigmas_h.all()

        # mean and covariance of prediction passed through UT
        zp, Pz = UT(sigmas_h, Wm, Wc, self.R)

        # compute cross variance of the state and the measurements
        Pxz = zeros((self._dim_x, self._dim_z))
        for i in range(self._num_sigmas):
            Pxz += Wc[i] * np.outer(sigmas_f[i] - self.xp,
                                    residual(sigmas_h[i], zp))

        K = dot(Pxz, inv(Pz)) # Kalman gain
        y = residual(z, zp)   #residual

        self.x = self.xp + dot(K, y)
        self.P = self.Pp - dot3(K, Pz, K.T)
コード例 #22
0
ファイル: EKF.py プロジェクト: weiweikong/filterpy
    def predict(self, u=0):
        """ Predict next position.

        **Parameters**

        u : np.array
            Optional control vector. If non-zero, it is multiplied by B
            to create the control input into the system.
        """

        self.predict_x(u)
        self._P = dot3(self._F, self._P, self._F.T) + self._Q
コード例 #23
0
    def update(self, z, R=None):
        """
        Add a new measurement (z) to the kalman filter. If z is None, nothing
        is changed.

        Parameters
        ----------

        z : np.array
            measurement for this update.

        R : np.array, scalar, or None
            Optionally provide R to override the measurement noise for this
            one call, otherwise  self.R will be used.
        """

        if z is None:
            return

        if R is None:
            R = self.R
        if np.isscalar(R):
            R = eye(self.dim_z) * R

        N = self.N
        dim_z = len(z)
        sigmas_h = zeros((N, dim_z))

        # transform sigma points into measurement space
        for i in range(N):
            sigmas_h[i] = self.hx(self.sigmas[i])

        z_mean = np.mean(sigmas_h, axis=0)

        P_zz = 0
        for sigma in sigmas_h:
            s = sigma - z_mean
            P_zz += outer(s, s)
        P_zz = P_zz / (N - 1) + R

        P_xz = 0
        for i in range(N):
            P_xz += outer(self.sigmas[i] - self.x, sigmas_h[i] - z_mean)
        P_xz /= N - 1

        K = dot(P_xz, inv(P_zz))

        e_r = multivariate_normal([0] * dim_z, R, N)
        for i in range(N):
            self.sigmas[i] += dot(K, z + e_r[i] - sigmas_h[i])

        self.x = np.mean(self.sigmas, axis=0)
        self.P = self.P - dot3(K, P_zz, K.T)
コード例 #24
0
ファイル: EKF.py プロジェクト: ytaleb17/filterpy
    def predict(self, u=0):
        """ Predict next position.

        **Parameters**

        u : np.array
            Optional control vector. If non-zero, it is multiplied by B
            to create the control input into the system.
        """

        self.predict_x(u)
        self._P = dot3(self._F, self._P, self._F.T) + self._Q
コード例 #25
0
    def update(self, z, R=None):
        """
        Add a new measurement (z) to the kalman filter. If z is None, nothing
        is changed.

        Parameters
        ----------

        z : np.array
            measurement for this update.

        R : np.array, scalar, or None
            Optionally provide R to override the measurement noise for this
            one call, otherwise  self.R will be used.
        """

        if z is None:
            return

        if R is None:
            R = self.R
        if np.isscalar(R):
            R = eye(self.dim_z) * R

        N = self.N
        dim_z = len(z)
        sigmas_h = zeros((N, dim_z))

        # transform sigma points into measurement space
        for i in range(N):
            sigmas_h[i] = self.hx(self.sigmas[i])

        z_mean = np.mean(sigmas_h, axis=0)

        P_zz = 0
        for sigma in sigmas_h:
            s = sigma - z_mean
            P_zz += outer(s, s)
        P_zz = P_zz / (N-1) + R

        P_xz = 0
        for i in range(N):
            P_xz += outer(self.sigmas[i] - self.x, sigmas_h[i] - z_mean)
        P_xz /= N-1

        K = dot(P_xz, inv(P_zz))

        e_r = multivariate_normal([0]*dim_z, R, N)
        for i in range(N):
            self.sigmas[i] += dot(K, z + e_r[i] - sigmas_h[i])

        self.x = np.mean(self.sigmas, axis=0)
        self.P = self.P - dot3(K, P_zz, K.T)
コード例 #26
0
    def ekffilter(self,accel_array,uwb_dis):
        if self.StatusLast[0]==0:
            
            #计算位置,使用TOA
            tag_pos=self.LSQ_TOA(uwb_dis)
            self.StatusLast[0,0]=tag_pos[0]
            self.StatusLast[1,0]=tag_pos[1]
            return tag_pos
        tag_pos_imu=self.imupos(accel_array)
        
        Zobs=self.Z_observe(uwb_dis,tag_pos_imu)
        
        dt=symbols('dt')
        thesubs={dt:self.dt_uwb}
        F_nummat = array(self.F.evalf(subs=thesubs)).astype(float)
        '''
        这里需要进一步的分析,是重新归零初值还是继承上一时刻的值
        '''
        Xpre=array([[0,0,0,0,0,0]]).T
        #Xpre=self.XLastEsti
        Xpre=dot(F_nummat,Xpre)
        
        thesubs={dt:self.dt_uwb}
        F_nummat = array(self.F.evalf(subs=thesubs)).astype(float)
        Q_nummat = array(self.Q.evalf(subs=thesubs)).astype(float)     
        self.P=dot(F_nummat,self.P).dot(F_nummat.T)+Q_nummat#更新先验协方差
        H=self.H_jac_cacu(Xpre,tag_pos_imu)#cacu H
        S = dot3(H, self.P, H.T) + self.R
        K = dot3(self.P, H.T, S.I)#cacu K
        
        Zpre=dot(H,Xpre)
        Xest=Xpre+Matrix(K)*(Zobs-Zpre)
        self.StatusLast=self.StatusLast+Xest[:4,0]
        tag_pos=[0,0]
        tag_pos[0]=self.StatusLast[0,0]
        tag_pos[1]=self.StatusLast[1,0]
        I_KH=np.eye(6)-dot(K,H)
        self.P=dot(I_KH,self.P)
#        self.XLastEsti=np.array(Xest).astype(np.float64)
        return tag_pos
コード例 #27
0
ファイル: EKF.py プロジェクト: ytaleb17/filterpy
    def update(self, z, HJacobian, Hx, R=None):
        """ Performs the update innovation of the extended Kalman filter.

        **Parameters**

        z : np.array
            measurement for this step.
            If `None`, only predict step is perfomed.

        HJacobian : function
           function which computes the Jacobian of the H matrix (measurement
           function). Takes state variable (self.x) as input, returns H.


        Hx : function
            function which takes a state variable and returns the measurement
            that would correspond to that state.
        """

        P = self._P
        if R is None:
            R = self._R
        elif np.isscalar(R):
            R = eye(self.dim_z) * R

        if np.isscalar(z) and self.dim_z == 1:
            z = np.asarray([z], float)

        x = self._x

        H = HJacobian(x)

        S = dot3(H, P, H.T) + R
        K = dot3(P, H.T, linalg.inv(S))

        y = z - Hx(x)
        self._x = x + dot(K, y)

        I_KH = self._I - dot(K, H)
        self._P = dot3(I_KH, P, I_KH.T) + dot3(K, R, K.T)
コード例 #28
0
ファイル: EKF.py プロジェクト: PepSalehi/filterpy
    def update(self, z, HJacobian, Hx, R=None):
        """ Performs the update innovation of the extended Kalman filter.

        **Parameters**

        z : np.array
            measurement for this step.
            If `None`, only predict step is perfomed.

        HJacobian : function
           function which computes the Jacobian of the H matrix (measurement
           function). Takes state variable (self.x) as input, returns H.


        Hx : function
            function which takes a state variable and returns the measurement
            that would correspond to that state.
        """

        P = self._P
        if R is None:
            R = self._R
        elif np.isscalar(R):
            R = eye(self.dim_z) * R

        if np.isscalar(z) and self.dim_z == 1:
            z = np.asarray([z], float)

        x = self._x

        H = HJacobian(x)

        S = dot3(H, P, H.T) + R
        K = dot3(P, H.T, linalg.inv (S))

        y = z - Hx(x)
        self._x = x + dot(K, y)

        I_KH = self._I - dot(K, H)
        self._P = dot3(I_KH, P, I_KH.T) + dot3(K, R, K.T)
コード例 #29
0
    def rts_smoother(self, Xs, Ps, Qs=None, dt=None):
        assert len(Xs) == len(Ps)
        n, dim_x = Xs.shape

        if dt is None:
            dt = [self._dt] * n
        elif isscalar(dt):
            dt = [dt] * n

        if Qs is None:
            Qs = [self.Q] * n

        # smoother gain
        Ks = zeros((n, dim_x, dim_x))

        num_sigmas = self._num_sigmas

        xs, ps = Xs.copy(), Ps.copy()
        sigmas_f = zeros((num_sigmas, dim_x))

        for k in range(n - 2, -1, -1):
            # create sigma points from state estimate, pass through state func
            sigmas = self.points_fn.sigma_points(xs[k], ps[k])
            for i in range(num_sigmas):
                sigmas_f[i] = self.fx(sigmas[i], dt[k])

            # compute backwards prior state and covariance
            xb = dot(self.Wm, sigmas_f)
            Pb = 0
            x = Xs[k]
            for i in range(num_sigmas):
                y = self.residual_x(sigmas_f[i], x)
                Pb += self.Wc[i] * outer(y, y)
            Pb += Qs[k]

            # compute cross variance
            Pxb = 0
            for i in range(num_sigmas):
                z = self.residual_x(sigmas[i], Xs[k])
                y = self.residual_x(sigmas_f[i], xb)
                Pxb += self.Wc[i] * outer(z, y)

            # compute gain
            K = dot(Pxb, inv(Pb))

            # update the smoothed estimates
            xs[k] += dot(K, self.residual_x(xs[k + 1], xb))
            ps[k] += dot3(K, ps[k + 1] - Pb, K.T)
            Ks[k] = K

        return (xs, ps, Ks)
コード例 #30
0
def predict(x, P, F=1, Q=0, u=0, B=1, alpha=1.):
    """ Predict next position using the Kalman filter state propagation
    equations.

    Parameters
    ----------

    x : numpy.array
        State estimate vector

    P : numpy.array
        Covariance matrix

    F : numpy.array()
        State Transition matrix

    Q : numpy.array
        Process noise matrix


    u : numpy.array, default 0.
        Control vector. If non-zero, it is multiplied by B
        to create the control input into the system.

    B : numpy.array, default 0.
        Optional control transition matrix.

    alpha : float, default=1.0
        Fading memory setting. 1.0 gives the normal Kalman filter, and
        values slightly larger than 1.0 (such as 1.02) give a fading
        memory effect - previous measurements have less influence on the
        filter's estimates. This formulation of the Fading memory filter
        (there are many) is due to Dan Simon

    Returns
    -------

    x : numpy.array
        Prior state estimate vector

    P : numpy.array
        Prior covariance matrix
    """

    #if np.isscalar(F):
    F = np.array(F)
    print("PREDICT:", F, x, B, u)
    x = dot(F, x) + dot(B, u)
    P = (alpha * alpha) * dot3(F, P, F.T) + Q

    return x, P
コード例 #31
0
ファイル: EKF.py プロジェクト: Filos0f/SIS-assignment
    def update(self,
               z,
               HJacobian,
               Hx,
               R=None,
               args=(),
               hx_args=(),
               residual=np.subtract):
        if not isinstance(args, tuple):
            args = (args, )

        if not isinstance(hx_args, tuple):
            hx_args = (hx_args, )

        P = self._P
        if R is None:
            R = self._R
        elif np.isscalar(R):
            R = eye(self.dim_z) * R

        if np.isscalar(z) and self.dim_z == 1:
            z = np.asarray([z], float)

        x = self._x

        H = HJacobian(x, *args)

        S = dot3(H, P, H.T) + R
        K = dot3(P, H.T, linalg.inv(S))
        self._K = K

        hx = Hx(x, *hx_args)
        y = residual(z, hx)
        self._y = y
        self._x = x + dot(K, y)

        I_KH = self._I - dot(K, H)
        self._P = dot3(I_KH, P, I_KH.T) + dot3(K, R, K.T)
コード例 #32
0
    def predict(self, u=0):
        """ Predict next position.

        Parameters
        ----------

        u : ndarray
            Optional control vector. If non-zero, it is multiplied by B
            to create the control input into the system.
        """

        # x = Fx + Bu

        A = dot3(self._F_inv.T, self.P_inv, self._F_inv)
        try:
            AI = inv(A)
            invertable = True
            if self._no_information:
                try:
                    self.x = dot(inv(self.P_inv), self.x)
                except:
                    self.x = dot(0, self.x)
                self._no_information = False
        except:
            invertable = False
            self._no_information = True

        if invertable:
            self.x = dot(self._F, self.x) + dot(self.B, u)
            self.P_inv = inv(AI + self.Q)
        else:
            I_PF = self._I - dot(self.P_inv, self._F_inv)
            FTI = inv(self._F.T)
            FTIX = dot(FTI, self.x)
            print('Q=', self.Q)
            print('A=', A)
            AQI = inv(A + self.Q)
            self.x = dot(FTI, dot3(I_PF, AQI, FTIX))
コード例 #33
0
ファイル: information_filter.py プロジェクト: CeasarSS/books
    def predict(self, u=0):
        """ Predict next position.

        Parameters
        ----------

        u : ndarray
            Optional control vector. If non-zero, it is multiplied by B
            to create the control input into the system.
        """

        # x = Fx + Bu

        A = dot3(self._F_inv.T, self._P_inv, self._F_inv)
        try:
            AI = inv(A)
            invertable = True
            if self._no_information:
                try:
                    self._x = dot(inv(self._P_inv), self._x)
                except:
                    self._x = dot(0, self._x)
                self._no_information = False
        except:
            invertable = False
            self._no_information  = True

        if invertable:
            self._x = dot(self._F, self.x) + dot(self._B, u)
            self._P_inv = inv(AI + self._Q)
        else:
            I_PF = self._I - dot(self._P_inv,self._F_inv)
            FTI = inv(self._F.T)
            FTIX = dot(FTI, self._x)
            print('Q=', self._Q)
            print('A=', A)
            AQI = inv(A + self._Q)
            self._x = dot(FTI, dot3(I_PF, AQI, FTIX))
コード例 #34
0
ファイル: kalman_filter.py プロジェクト: EJHortala/books-2
def predict(x, P, F=1, Q=0, u=0, B=1, alpha=1.):
    """ Predict next position using the Kalman filter state propagation
    equations.

    Parameters
    ----------

    x : numpy.array
        State estimate vector

    P : numpy.array
        Covariance matrix

    F : numpy.array()
        State Transition matrix

    Q : numpy.array
        Process noise matrix


    u : numpy.array, default 0.
        Control vector. If non-zero, it is multiplied by B
        to create the control input into the system.

    B : numpy.array, default 0.
        Optional control transition matrix.

    alpha : float, default=1.0
        Fading memory setting. 1.0 gives the normal Kalman filter, and
        values slightly larger than 1.0 (such as 1.02) give a fading
        memory effect - previous measurements have less influence on the
        filter's estimates. This formulation of the Fading memory filter
        (there are many) is due to Dan Simon

    Returns
    -------

    x : numpy.array
        Prior state estimate vector

    P : numpy.array
        Prior covariance matrix
    """

    if np.isscalar(F):
        F = np.array(F)
    x = dot(F, x) + dot(B, u)
    P = (alpha * alpha) * dot3(F, P, F.T) + Q

    return x, P
コード例 #35
0
ファイル: kalman_filter.py プロジェクト: PepSalehi/filterpy
    def predict(self, u=0):
        """ Predict next position.

        **Parameters**

        u : np.array
            Optional control vector. If non-zero, it is multiplied by B
            to create the control input into the system.
        """

        # x = Fx + Bu
        self._x = dot(self._F, self.x) + dot(self._B, u)

        # P = FPF' + Q
        self._P = self._alpha_sq * dot3(self._F, self._P, self._F.T) + self._Q
コード例 #36
0
ファイル: kalman_filter.py プロジェクト: ytaleb17/filterpy
    def predict(self, u=0):
        """ Predict next position.

        **Parameters**

        u : np.array
            Optional control vector. If non-zero, it is multiplied by B
            to create the control input into the system.
        """

        # x = Fx + Bu
        self._x = dot(self._F, self.x) + dot(self._B, u)

        # P = FPF' + Q
        self._P = self._alpha_sq * dot3(self._F, self._P, self._F.T) + self._Q
コード例 #37
0
ファイル: ekf.py プロジェクト: ytaleb17/crazyflie-tools
    def update(self, z, R=None):
        """ Performs the update innovation of the extended Kalman filter. """

        P = self._P
        if R is None:
            R = self._R
        elif np.isscalar(R):
            R = eye(self.dim_z) * R

        if np.isscalar(z) and self.dim_z == 1:
            z = np.asarray([z], float)

        x = self._x

        [h,H] = self.hx(x)

        S = dot3(H, P, H.T) + R
        K = dot3(P, H.T, linalg.inv(S))

        y = z.reshape(self.dim_z,1) - h
        self._x = x + dot(K, y)

        I_KH = self._I - dot(K, H)
        self._P = dot3(I_KH, P, I_KH.T) + dot3(K, R, K.T)
コード例 #38
0
    def update(self, z, R2=None):
        """
        Add a new measurement (z) to the kalman filter. If z is None, nothing
        is changed.

        Parameters
        ----------

        z : np.array
            measurement for this update.

        R2 : np.array, scalar, or None
            Sqrt of meaaurement noize. Optionally provide to override the
            measurement noise for this one call, otherwise  self.R2 will
            be used.
        """

        if z is None:
            return

        if R2 is None:
            R2 = self._R1_2
        elif np.isscalar(R2):
            R2 = eye(self.dim_z) * R2

        # rename for convienance
        dim_z = self.dim_z
        M = self._M

        M[0:dim_z, 0:dim_z] = R2.T
        M[dim_z:, 0:dim_z] = dot(self._H, self._P1_2).T
        M[dim_z:, dim_z:] = self._P1_2.T

        _, S = qr(M)
        self._K = S[0:dim_z, dim_z:].T
        N = S[0:dim_z, 0:dim_z].T

        # y = z - Hx
        # error (residual) between measurement and prediction
        self._y = z - dot(self._H, self._x)

        # x = x + Ky
        # predict new x with residual scaled by the kalman gain
        self._x += dot3(self._K, pinv(N), self._y)
        self._P1_2 = S[dim_z:, dim_z:].T
コード例 #39
0
ファイル: square_root.py プロジェクト: PacoSpike/filterpy
    def update(self, z, R2=None):
        """
        Add a new measurement (z) to the kalman filter. If z is None, nothing
        is changed.

        Parameters
        ----------

        z : np.array
            measurement for this update.

        R2 : np.array, scalar, or None
            Sqrt of meaaurement noize. Optionally provide to override the
            measurement noise for this one call, otherwise  self.R2 will
            be used.
        """

        if z is None:
            return

        if R2 is None:
            R2 = self._R1_2
        elif np.isscalar(R2):
            R2 = eye(self.dim_z) * R2

        # rename for convienance
        dim_z = self.dim_z
        M = self._M

        M[0:dim_z, 0:dim_z] = R2.T
        M[dim_z:, 0:dim_z] = dot(self._H, self._P1_2).T
        M[dim_z:, dim_z:] = self._P1_2.T

        _, S = qr(M)
        self._K = S[0:dim_z,  dim_z:].T
        N = S[0:dim_z, 0:dim_z].T

        # y = z - Hx
        # error (residual) between measurement and prediction
        self._y = z - dot(self._H, self._x)

        # x = x + Ky
        # predict new x with residual scaled by the kalman gain
        self._x += dot3(self._K, pinv(N), self._y)
        self._P1_2 = S[dim_z:, dim_z:].T
コード例 #40
0
ファイル: kalman_filter.py プロジェクト: ytaleb17/filterpy
    def get_prediction(self, u=0):
        """ Predicts the next state of the filter and returns it. Does not
        alter the state of the filter.

        **Parameters**

        u : np.array
            optional control input

        **Returns**

        (x, P)
            State vector and covariance array of the prediction.
        """

        x = dot(self._F, self._x) + dot(self._B, u)
        P = self._alpha_sq * dot3(self._F, self._P, self._F.T) + self._Q
        return (x, P)
コード例 #41
0
ファイル: kalman_filter.py プロジェクト: bmswgnp/filterpy
    def get_prediction(self, u=0):
        """ Predicts the next state of the filter and returns it. Does not
        alter the state of the filter.

        **Parameters**

        u : np.array
            optional control input

        **Returns**

        (x, P)
            State vector and covariance array of the prediction.
        """

        x = dot(self._F, self._x) + dot(self._B, u)
        P = self._alpha_sq * dot3(self._F, self._P, self._F.T) + self._Q
        return (x, P)
コード例 #42
0
    def update_correlated(self, z, R=None, H=None):

        if z is None:
            return

        if R is None:
            R = self.R
        elif isscalar(R):
            R = eye(self.dim_z) * R

        # rename for readability and a tiny extra bit of speed
        if H is None:
            H = self.H
        x = self.x
        P = self.P
        M = self.M

        # handle special case: if z is in form [[z]] but x is not a column
        # vector dimensions will not match
        if x.ndim==1 and shape(z) == (1,1):
            z = z[0]

        if shape(z) == (): # is it scalar, e.g. z=3 or z=np.array(3)
            z = np.asarray([z])

        # y = z - Hx
        # error (residual) between measurement and prediction
        self.y = z - dot(H, x)

        # project system uncertainty into measurement space
        self.S = dot3(H, P, H.T) + dot(H, M) + dot(M.T, H.T) + R

        # K = PH'inv(S)
        # map system uncertainty into kalman gain
        self.K = dot(dot(P, H.T) + M, linalg.inv(self.S))

        # x = x + Ky
        # predict new x with residual scaled by the kalman gain
        self.x = x + dot(self.K, self.y)
        self.P = P - dot(self.K, dot(H, P) + M.T)

        # compute log likelihood
        self.log_likelihood = logpdf(z, dot(H, x), self.S)
コード例 #43
0
ファイル: ukf.py プロジェクト: blandry/crazyflie-tools
    def update(self, z, R=None, residual=np.subtract, UT=None):

        if isscalar(z):
            dim_z = 1
        else:
            dim_z = len(z)

        if R is None:
            R = self.R
        elif np.isscalar(R):
            R = eye(self._dim_z) * R

        # rename for readability
        sigmas_f = self.sigmas_f
        sigmas_h = zeros((self._num_sigmas, dim_z))

        if UT is None:
            UT = unscented_transform

        # transform sigma points into measurement space
        for i in range(self._num_sigmas):
            [sigmas_h[i], H] = self.hx(sigmas_f[i])

        # mean and covariance of prediction passed through unscented transform
        zp, Pz = UT(sigmas_h, self.W, self.W, R)

        # compute cross variance of the state and the measurements
        '''self.Pxz = zeros((self._dim_x, dim_z))
        for i in range(self._num_sigmas):
            self.Pxz += self.W[i] * np.outer(sigmas_f[i] - self.x,
                                        residual(sigmas_h[i], zp))'''

        # this is the unreadable but fast implementation of the
        # commented out loop above
        yh = sigmas_f - self.x[np.newaxis, :]
        yz = residual(sigmas_h, zp[np.newaxis, :])
        self.Pxz = yh.T.dot(np.diag(self.W)).dot(yz)

        K = dot(self.Pxz, inv(Pz)) # Kalman gain
        y = residual(z, zp)

        self.x = self.x + dot(K, y)
        self.P = self.P - dot3(K, Pz, K.T)
コード例 #44
0
ファイル: kalman_filter.py プロジェクト: amicol/filterpy
    def predict(self, u=0, Q=None):
        """ Predict next position.

        **Parameters**

        u : np.array
            Optional control vector. If non-zero, it is multiplied by B
            to create the control input into the system.
        """
        if Q is None:
            Q = self._Q
        elif isscalar(Q):
            Q = eye(self.dim_x) * Q
            
        # x = Fx + Bu
        self._x = dot(self._F, self.x) + dot(self._B, u)

        # P = FPF' + Q
        self._P = self._alpha_sq * dot3(self._F, self._P, self._F.T) + Q
コード例 #45
0
ファイル: ukf.py プロジェクト: ytaleb17/crazyflie-tools
    def update(self, z, R=None, residual=np.subtract, UT=None):

        if isscalar(z):
            dim_z = 1
        else:
            dim_z = len(z)

        if R is None:
            R = self.R
        elif np.isscalar(R):
            R = eye(self._dim_z) * R

        # rename for readability
        sigmas_f = self.sigmas_f
        sigmas_h = zeros((self._num_sigmas, dim_z))

        if UT is None:
            UT = unscented_transform

        # transform sigma points into measurement space
        for i in range(self._num_sigmas):
            [sigmas_h[i], H] = self.hx(sigmas_f[i])

        # mean and covariance of prediction passed through unscented transform
        zp, Pz = UT(sigmas_h, self.W, self.W, R)

        # compute cross variance of the state and the measurements
        '''self.Pxz = zeros((self._dim_x, dim_z))
        for i in range(self._num_sigmas):
            self.Pxz += self.W[i] * np.outer(sigmas_f[i] - self.x,
                                        residual(sigmas_h[i], zp))'''

        # this is the unreadable but fast implementation of the
        # commented out loop above
        yh = sigmas_f - self.x[np.newaxis, :]
        yz = residual(sigmas_h, zp[np.newaxis, :])
        self.Pxz = yh.T.dot(np.diag(self.W)).dot(yz)

        K = dot(self.Pxz, inv(Pz))  # Kalman gain
        y = residual(z, zp)

        self.x = self.x + dot(K, y)
        self.P = self.P - dot3(K, Pz, K.T)
コード例 #46
0
    def predict(self, u=0, B=None, F=None, Q=None):

        if B is None:
            B = self.B
        if F is None:
            F = self.F
        if Q is None:
            Q = self.Q
        elif isscalar(Q):
            Q = eye(self.dim_x) * Q

        # x = Fx + Bu
        self.x = dot(F, self.x) + dot(B, u)

        # P = FPF' + Q
        self.P = self._alpha_sq * dot3(F, self.P, F.T) + Q

        self.x_pred = self.x[:]
        self.P_pred = self.P[:]
コード例 #47
0
    def get_prediction(self, u=0):
        """ Predicts the next state of the filter and returns it. Does not
        alter the state of the filter.

        Parameters
        ----------

        u : np.array
            optional control input

        Returns
        -------

        (x, P) : tuple
            State vector and covariance array of the prediction.
        """

        x = dot(self.F, self.x) + dot(self.B, u)
        P = self._alpha_sq * dot3(self.F, self.P, self.F.T) + self.Q
        return (x, P)
コード例 #48
0
    def predict(self, u=0, B=None, F=None, Q=None):
        """ Predict next position using the Kalman filter state propagation
        equations.

        Parameters
        ----------

        u : np.array
            Optional control vector. If non-zero, it is multiplied by B
            to create the control input into the system.

        B : np.array(dim_x, dim_z), or None
            Optional control transition matrix; a value of None in
            any position will cause the filter to use `self.B`.

        F : np.array(dim_x, dim_x), or None
            Optional state transition matrix; a value of None in
            any position will cause the filter to use `self.F`.

        Q : np.array(dim_x, dim_x), scalar, or None
            Optional process noise matrix; a value of None in
            any position will cause the filter to use `self.Q`.
        """

        if B is None:
            B = self.B
        if F is None:
            F = self.F
        if Q is None:
            Q = self.Q
        elif isscalar(Q):
            Q = eye(self.dim_x) * Q

        # x = Fx + Bu
        self.x = dot(F, self.x) + dot(B, u)

        # P = FPF' + Q
        self.P = self._alpha_sq * dot3(F, self.P, F.T) + Q

        self.x_pred = self.x[:]
        self.P_pred = self.P[:]
コード例 #49
0
    def get_prediction(self, u=0):
        """ Predicts the next state of the filter and returns it. Does not
        alter the state of the filter.

        Parameters
        ----------

        u : np.array
            optional control input

        Returns
        -------

        (x, P)
            State vector and covariance array of the prediction.
        """
        raise "Not implemented yet"

        x = dot(self._F, self.x) + dot(self.B, u)
        P = dot3(self._F, self._P, self._F.T) + self.Q
        return (x, P)
コード例 #50
0
ファイル: information_filter.py プロジェクト: CeasarSS/books
    def get_prediction(self, u=0):
        """ Predicts the next state of the filter and returns it. Does not
        alter the state of the filter.

        Parameters
        ----------

        u : np.array
            optional control input

        Returns
        -------

        (x, P)
            State vector and covariance array of the prediction.
        """
        raise "Not implemented yet"

        x = dot(self._F, self._x) + dot(self._B, u)
        P = dot3(self._F, self._P, self._F.T) + self.Q
        return (x, P)
コード例 #51
0
    def update(self, z, R=None, residual=np.subtract):
        if isscalar(z):
            dim_z = 1
        else:
            dim_z = len(z)

        if R is None:
            R = self.R
        elif np.isscalar(R):
            R = eye(self._dim_z) * R

        cubatures_f = self.cubatures_f
        cubatures_h = zeros((self._num_cubatures, dim_z))

        CT = cubature_transform

        # transform cubature points to measurement space
        for i in range(self._num_cubatures):
            cubatures_h[i] = self.hx(cubatures_f[i])


#        print "观测容积点"
#        print cubatures_h

# mean and covariance of prediction
        zp, Pz = CT(cubatures_h, self.W, self.W, R)

        # cross variance of the state and the measurements
        self.Pxz = zeros((self._dim_x, dim_z))

        for i in range(self._num_cubatures):
            self.Pxz += self.W[i] * np.outer(cubatures_f[i] - self.x,
                                             residual(cubatures_h[i], zp))

        self.K = dot(self.Pxz, inv(Pz))
        y = residual(z, zp)

        self.x = self.x + dot(self.K, y)
        self.P = self.P - dot3(self.K, Pz, self.K.T)
コード例 #52
0
ファイル: kalman_filter.py プロジェクト: CeasarSS/books
    def predict(self, u=0, B=None, F=None, Q=None):
        """ Predict next position using the Kalman filter state propagation
        equations.

        Parameters
        ----------

        u : np.array
            Optional control vector. If non-zero, it is multiplied by B
            to create the control input into the system.

        B : np.array(dim_x, dim_z), or None
            Optional control transition matrix; a value of None in
            any position will cause the filter to use `self.B`.

        F : np.array(dim_x, dim_x), or None
            Optional state transition matrix; a value of None in
            any position will cause the filter to use `self.F`.

        Q : np.array(dim_x, dim_x), scalar, or None
            Optional process noise matrix; a value of None in
            any position will cause the filter to use `self.Q`.
        """

        if B is None:
            B = self._B
        if F is None:
            F = self._F
        if Q is None:
            Q = self._Q
        elif isscalar(Q):
            Q = eye(self.dim_x) * Q

        # x = Fx + Bu
        self._x = dot(F, self.x) + dot(B, u)

        # P = FPF' + Q
        self._P = self._alpha_sq * dot3(F, self._P, F.T) + Q
コード例 #53
0
ファイル: ckf.py プロジェクト: LEEluoman/MyCodes
    def update(self, z, R=None, residual=np.subtract):
        if isscalar(z):
            dim_z = 1
        else:
            dim_z = len(z)
        
        if R is None:
            R = self.R
        elif np.isscalar(R):
            R = eye(self._dim_z) * R
        
        cubatures_f = self.cubatures_f
        cubatures_h = zeros((self._num_cubatures,dim_z))
        
        CT = cubature_transform
        
        # transform cubature points to measurement space
        for i in range(self._num_cubatures):
            cubatures_h[i] = self.hx(cubatures_f[i])
#        print "观测容积点"
#        print cubatures_h
        
        # mean and covariance of prediction
        zp, Pz = CT(cubatures_h, self.W, self.W, R)
        
        # cross variance of the state and the measurements
        self.Pxz = zeros((self._dim_x,dim_z))
        
        for i in range(self._num_cubatures):
            self.Pxz += self.W[i]*np.outer(cubatures_f[i]-self.x,residual(cubatures_h[i],zp))
        
        self.K = dot(self.Pxz, inv(Pz))
        y = residual(z, zp)
        
        self.x = self.x + dot(self.K, y)
        self.P = self.P - dot3(self.K, Pz, self.K.T)
コード例 #54
0
ファイル: UKF.py プロジェクト: thearcaetect/filterpy
    def rts_smoother(self, Xs, Ps, Qs=None, dt=None):
        """ Runs the Rauch-Tung-Striebal Kalman smoother on a set of
        means and covariances computed by the UKF. The usual input
        would come from the output of `batch_filter()`.

        Parameters
        ----------

        Xs : numpy.array
           array of the means (state variable x) of the output of a Kalman
           filter.

        Ps : numpy.array
            array of the covariances of the output of a kalman filter.

        Qs: list-like collection of numpy.array, optional
            Process noise of the Kalman filter at each time step. Optional,
            if not provided the filter's self.Q will be used

        dt : optional, float or array-like of float
            If provided, specifies the time step of each step of the filter.
            If float, then the same time step is used for all steps. If
            an array, then each element k contains the time  at step k.
            Units are seconds.

        Returns
        -------

        x : numpy.ndarray
           smoothed means

        P : numpy.ndarray
           smoothed state covariances

        K : numpy.ndarray
            smoother gain at each step

        Examples
        --------

        .. code-block:: Python

            zs = [t + random.randn()*4 for t in range (40)]

            (mu, cov, _, _) = kalman.batch_filter(zs)
            (x, P, K) = rts_smoother(mu, cov, fk.F, fk.Q)
        """

        assert len(Xs) == len(Ps)
        n, dim_x = Xs.shape

        if dt is None:
            dt = [self._dt] * n
        elif isscalar(dt):
            dt = [dt] * n

        if Qs is None:
            Qs = [self.Q] * n

        # smoother gain
        Ks = zeros((n,dim_x,dim_x))

        num_sigmas = 2*dim_x + 1

        xs, ps = Xs.copy(), Ps.copy()
        sigmas_f = zeros((num_sigmas, dim_x))

        for k in range(n-2,-1,-1):
            # create sigma points from state estimate, pass through state func
            sigmas = self.points_fn.sigma_points(xs[k], ps[k])
            for i in range(num_sigmas):
                sigmas_f[i] = self.fx(sigmas[i], dt[k])

            # compute backwards prior state and covariance
            xb = dot(self.Wm, sigmas_f)
            Pb = 0
            x = Xs[k]
            for i in range(num_sigmas):
                y = sigmas_f[i] - x
                Pb += self.Wm[i] * outer(y, y)
            Pb += Qs[k]

            # compute cross variance
            Pxb = 0
            for i in range(num_sigmas):
                z = sigmas[i] - Xs[k]
                y = sigmas_f[i] - xb
                Pxb += self.Wm[i] * outer(z, y)

            # compute gain
            K = dot(Pxb, inv(Pb))

            # update the smoothed estimates
            xs[k] += dot (K, xs[k+1] - xb)
            ps[k] += dot3(K, ps[k+1] - Pb, K.T)
            Ks[k] = K

        return (xs, ps, Ks)
コード例 #55
0
ファイル: UKF.py プロジェクト: PepSalehi/filterpy
    def update(self, z, R=None, residual=np.subtract, UT=None):
        """ Update the UKF with the given measurements. On return,
        self.x and self.P contain the new mean and covariance of the filter.

        **Parameters**

        z : numpy.array of shape (dim_z)
            measurement vector

        R : numpy.array((dim_z, dim_z)), optional
            Measurement noise. If provided, overrides self.R for
            this function call.

        residual : function (z, z2), optional
            Optional function that computes the residual (difference) between
            the two measurement vectors. If you do not provide this, then the
            built in minus operator will be used. You will normally want to use
            the built in unless your residual computation is nonlinear (for
            example, if they are angles)

        UT : function(sigmas, Wm, Wc, noise_cov), optional
            Optional function to compute the unscented transform for the sigma
            points passed through hx. Typically the default function will
            work, but if for example you are using angles the default method
            of computing means and residuals will not work, and you will have
            to define how to compute it.
        """

        if isscalar(z):
            dim_z = 1
        else:
            dim_z = len(z)

        if R is None:
            R = self.R
        elif np.isscalar(R):
            R = eye(self._dim_z) * R

        # rename for readability
        sigmas_f = self.sigmas_f
        sigmas_h = zeros((self._num_sigmas, dim_z))

        if UT is None:
            UT = unscented_transform

        # transform sigma points into measurement space
        for i in range(self._num_sigmas):
            sigmas_h[i] = self.hx(sigmas_f[i])

        # mean and covariance of prediction passed through inscented transform
        zp, Pz = UT(sigmas_h, self.W, self.W, R)

        # compute cross variance of the state and the measurements
        '''self.Pxz = zeros((self._dim_x, dim_z))
        for i in range(self._num_sigmas):
            self.Pxz += self.W[i] * np.outer(sigmas_f[i] - self.x,
                                        residual(sigmas_h[i], zp))'''

        # this is the unreadable but fast implementation of the
        # commented out loop above
        yh = sigmas_f - self.x[np.newaxis, :]
        yz = residual(sigmas_h, zp[np.newaxis, :])
        self.Pxz = yh.T.dot(np.diag(self.W)).dot(yz)

        K = dot(self.Pxz, inv(Pz)) # Kalman gain
        y = residual(z, zp)

        self.x = self.x + dot(K, y)
        self.P = self.P - dot3(K, Pz, K.T)
コード例 #56
0
    def smooth_batch(self, zs, N, us=None):
        """ batch smooths the set of measurements using a fixed lag smoother.
        I consider this function a somewhat pedalogical exercise; why would
        you not use a RTS smoother if you are able to batch process your data?
        Hint: RTS is a much better smoother, and faster besides. Use it.

        This is a batch processor, so it does not alter any of the object's
        data. In particular, self.x is NOT modified. All date is returned
        by the function.

        **Parameters**


        zs : ndarray of measurements

            iterable list (usually ndarray, but whatever works for you) of
            measurements that you want to smooth, one per time step.

        N : int
           size of fixed lag in time steps

        us : ndarray, optional

            If provided, control input to the filter for each time step


        **Returns**

        (xhat_smooth, xhat) : ndarray, ndarray

            xhat_smooth is the output of the N step fix lag smoother
            xhat is the filter output of the standard Kalman filter
        """


        # take advantage of the fact that np.array are assigned by reference.
        H = self.H
        R = self.R
        F = self.F
        P = self.P
        x = self.x
        Q = self.Q
        B = self.B

        if x.ndim == 1:
            xSmooth = zeros((len(zs), self.dim_x))
            xhat    = zeros((len(zs), self.dim_x))
        else:
            xSmooth = zeros((len(zs), self.dim_x, 1))
            xhat    = zeros((len(zs), self.dim_x, 1))
        for k, z in enumerate(zs):

            # predict step of normal Kalman filter
            x_pre = dot(F, x)
            if us is not None:
                x_pre += dot(B,us[k])

            P = dot3(F, P, F.T) + Q

            # update step of normal Kalman filter
            y = z - dot(H, x_pre)

            S = dot3(H, P, H.T) + R
            SI = inv(S)

            K = dot3(P, H.T, SI)

            x = x_pre + dot(K, y)

            I_KH = self._I - dot(K, H)
            P = dot3(I_KH, P, I_KH.T) + dot3(K, R, K.T)

            xhat[k]    = x.copy()
            xSmooth[k] = x_pre.copy()

            #compute invariants
            HTSI = dot(H.T, SI)
            F_LH = (F - dot(K,H)).T

            if k >= N:
                PS = P.copy() # smoothed P for step i
                for i in range (N):
                    K = dot(PS, HTSI)  # smoothed gain
                    PS = dot(PS, F_LH) # smoothed covariance

                    si = k-i
                    xSmooth[si] = xSmooth[si] + dot(K, y)
            else:
                # Some sources specify starting the fix lag smoother only
                # after N steps have passed, some don't. I am getting far
                # better results by starting only at step N.
                xSmooth[k] = xhat[k]

        return xSmooth, xhat
コード例 #57
0
    def smooth(self, z, u=None):
        """ Smooths the measurement using a fixed lag smoother.

        On return, self.xSmooth is populated with the N previous smoothed
        estimates,  where self.xSmooth[k] is the kth time step. self.x
        merely contains the current Kalman filter output of the most recent
        measurement, and is not smoothed at all (beyond the normal Kalman
        filter processing).

        self.xSmooth grows in length on each call. If you run this 1 million
        times, it will contain 1 million elements. Sure, we could minimize
        this, but then this would make the caller's code much more cumbersome.

        This also means that you cannot use this filter to track more than
        one data set; as data will be hopelessly intermingled. If you want
        to filter something else, create a new FixedLagSmoother object.

        **Parameters**

        z : ndarray or scalar
            measurement to be smoothed


        u : ndarray, optional
            If provided, control input to the filter
        """

        # take advantage of the fact that np.array are assigned by reference.
        H = self.H
        R = self.R
        F = self.F
        P = self.P
        x = self.x
        Q = self.Q
        B = self.B
        N = self.N

        k = self.count

        # predict step of normal Kalman filter
        x_pre = dot(F, x)
        if u is not None:
            x_pre += dot(B,u)

        P = dot3(F, P, F.T) + Q

        # update step of normal Kalman filter
        y = z - dot(H, x_pre)

        S = dot3(H, P, H.T) + R
        SI = inv(S)

        K = dot3(P, H.T, SI)

        x = x_pre + dot(K, y)

        I_KH = self._I - dot(K, H)
        P = dot3(I_KH, P, I_KH.T) + dot3(K, R, K.T)

        self.xSmooth.append(x_pre.copy())

        #compute invariants
        HTSI = dot(H.T, SI)
        F_LH = (F - dot(K,H)).T

        if k >= N:
            PS = P.copy() # smoothed P for step i
            for i in range (N):
                K = dot(PS, HTSI)  # smoothed gain
                PS = dot(PS, F_LH) # smoothed covariance

                si = k-i
                self.xSmooth[si] = self.xSmooth[si] + dot(K, y)
        else:
            # Some sources specify starting the fix lag smoother only
            # after N steps have passed, some don't. I am getting far
            # better results by starting only at step N.
           self.xSmooth[k] = x.copy()

        self.count += 1
        self.x = x
        self.P = P
コード例 #58
0
    def update_correlated(self, z, R=None, H=None):
        """ Add a new measurement (z) to the Kalman filter assuming that
        process noise and measurement noise are correlated as defined in
        the `self.M` matrix.

        If z is None, nothing is changed.

        **Parameters**

        z : np.array
            measurement for this update.

        R : np.array, scalar, or None
            Optionally provide R to override the measurement noise for this
            one call, otherwise  self.R will be used.

        H : np.array,  or None
            Optionally provide H to override the measurement function for this
            one call, otherwise  self.H will be used.

        """

        if z is None:
            return

        if R is None:
            R = self.R
        elif isscalar(R):
            R = eye(self.dim_z) * R

        # rename for readability and a tiny extra bit of speed
        if H is None:
            H = self.H
        x = self._x
        P = self._P
        M = self.M

        # y = z - Hx
        # error (residual) between measurement and prediction
        self._y = z - dot(H, x)

        # project system uncertainty into measurement space
        S = dot3(H, P, H.T) + dot(H, M) + dot(M.T, H.T) + R

        mean = np.array(dot(H, x)).flatten()
        flatz = np.array(z).flatten()

        self.likelihood = multivariate_normal.pdf(flatz, mean, cov=S, allow_singular=True)
        self.log_likelihood = multivariate_normal.logpdf(flatz, mean, cov=S, allow_singular=True)

        # K = PH'inv(S)
        # map system uncertainty into kalman gain
        K = dot(dot(P, H.T) + M, linalg.inv(S))

        # x = x + Ky
        # predict new x with residual scaled by the kalman gain
        self._x = x + dot(K, self._y)
        self._P = P - dot(K, dot(H, P) + M.T)

        self._S = S
        self._K = K