Exemplo n.º 1
0
    def update(self, z, R=None, hx_args=()):
        """ Update the CKF with the given measurements. On return,
        self.x and self.P contain the new mean and covariance of the filter.

        Parameters
        ----------

        z : numpy.array of shape (dim_z)
            measurement vector

        R : numpy.array((dim_z, dim_z)), optional
            Measurement noise. If provided, overrides self.R for
            this function call.

        hx_args : tuple, optional, default (,)
            arguments to be passed into Hx function after the required state
            variable.
        """

        if z is None:
            return

        if not isinstance(hx_args, tuple):
            hx_args = (hx_args, )

        if R is None:
            R = self.R
        elif isscalar(R):
            R = eye(self._dim_z) * R

        for k in range(self._num_sigmas):
            self.sigmas_h[k] = self.hx(self.sigmas_f[k], *hx_args)

        # mean and covariance of prediction passed through unscented transform
        #zp, Pz = UT(self.sigmas_h, self.Wm, self.Wc, R, self.z_mean, self.residual_z)
        zp, Pz = ckf_transform(self.sigmas_h, R)

        # compute cross variance of the state and the measurements
        Pxz = zeros((self._dim_x, self._dim_z))
        m = self._num_sigmas  # literaure uses m for scaling factor
        xf = self.x.flatten()
        zpf = zp.flatten()
        for k in range(m):
            dx = self.sigmas_f[k] - xf
            dz = self.sigmas_h[k] - zpf
            Pxz += outer(dx, dz)

        Pxz /= m

        self.K = dot(Pxz, inv(Pz))  # Kalman gain
        self.y = self.residual_z(z, zp)  #residual

        self.x = self.x + dot(self.K, self.y)
        self.P = self.P - dot(self.K, Pz).dot(self.K.T)

        if self.compute_log_likelihood:
            self.log_likelihood = logpdf(x=self.y, cov=Pz)
            self.likelihood = math.exp(self.log_likelihood)
            if self.likelihood == 0:
                self.likelihood = sys.float_info.min
Exemplo n.º 2
0
 def log_likelihood(self):
     """
     log-likelihood of the last measurement.
     """
     if self._log_likelihood is None:
         self._log_likelihood = logpdf(x=self.y, cov=self.S)
     return self._log_likelihood
Exemplo n.º 3
0
    def log_likelihood(self, z):
        """ log likelihood of the measurement `z`. """

        if z is None:
            return math.log(sys.float_info.min)
        else:
            return logpdf(z, dot(self.H, self.x), self.S)
Exemplo n.º 4
0
    def update(self, z, R=None, UT=None, hx_args=()):
        """ Update the UKF with the given measurements. On return,
        self.x and self.P contain the new mean and covariance of the filter.

        Parameters
        ----------

        z : numpy.array of shape (dim_z)
            measurement vector

        R : numpy.array((dim_z, dim_z)), optional
            Measurement noise. If provided, overrides self.R for
            this function call.

        UT : function(sigmas, Wm, Wc, noise_cov), optional
            Optional function to compute the unscented transform for the sigma
            points passed through hx. Typically the default function will
            work - you can use x_mean_fn and z_mean_fn to alter the behavior
            of the unscented transform.

        hx_args : tuple, optional, default (,)
            arguments to be passed into Hx function after the required state
            variable.
        """

        if z is None:
            return

        if not isinstance(hx_args, tuple):
            hx_args = (hx_args,)

        if UT is None:
            UT = unscented_transform

        if R is None:
            R = self.R
        elif isscalar(R):
            R = eye(self._dim_z) * R

        for i in range(self._num_sigmas):
            self.sigmas_h[i] = self.hx(self.sigmas_f[i], *hx_args)

        # mean and covariance of prediction passed through unscented transform
        zp, Pz = UT(self.sigmas_h, self.Wm, self.Wc, R, self.z_mean, self.residual_z)

        # compute cross variance of the state and the measurements
        Pxz = zeros((self._dim_x, self._dim_z))
        for i in range(self._num_sigmas):
            dx = self.residual_x(self.sigmas_f[i], self.x)
            dz =  self.residual_z(self.sigmas_h[i], zp)
            Pxz += self.Wc[i] * outer(dx, dz)


        self.K = dot(Pxz, inv(Pz))        # Kalman gain
        self.y = self.residual_z(z, zp)   # residual

        self.x = self.x + dot(self.K, self.y)
        self.P = self.P - dot(self.K, Pz).dot(self.K.T)

        self.log_likelihood = logpdf(self.y, np.zeros(len(self.y)), Pz)
Exemplo n.º 5
0
    def log_likelihood(self, z):
        """ log likelihood of the measurement `z`. """

        if z is None:
            return math.log(sys.float_info.min)
        else:
            return logpdf(z, dot(self.H, self.x), self.S)
Exemplo n.º 6
0
    def update(self, z, R=None, UT=None, hx_args=()):
        """ Update the UKF with the given measurements. On return,
        self.x and self.P contain the new mean and covariance of the filter.

        Parameters
        ----------

        z : numpy.array of shape (dim_z)
            measurement vector

        R : numpy.array((dim_z, dim_z)), optional
            Measurement noise. If provided, overrides self.R for
            this function call.

        UT : function(sigmas, Wm, Wc, noise_cov), optional
            Optional function to compute the unscented transform for the sigma
            points passed through hx. Typically the default function will
            work - you can use x_mean_fn and z_mean_fn to alter the behavior
            of the unscented transform.

        hx_args : tuple, optional, default (,)
            arguments to be passed into Hx function after the required state
            variable.
        """

        if z is None:
            return

        if not isinstance(hx_args, tuple):
            hx_args = (hx_args, )

        if UT is None:
            UT = unscented_transform

        if R is None:
            R = self.R
        elif isscalar(R):
            R = eye(self._dim_z) * R

        for i in range(self._num_sigmas):
            self.sigmas_h[i] = self.hx(self.sigmas_f[i], *hx_args)

        # mean and covariance of prediction passed through unscented transform
        zp, Pz = UT(self.sigmas_h, self.Wm, self.Wc, R, self.z_mean,
                    self.residual_z)

        # compute cross variance of the state and the measurements
        Pxz = zeros((self._dim_x, self._dim_z))
        for i in range(self._num_sigmas):
            dx = self.residual_x(self.sigmas_f[i], self.x)
            dz = self.residual_z(self.sigmas_h[i], zp)
            Pxz += self.Wc[i] * outer(dx, dz)

        self.K = dot(Pxz, inv(Pz))  # Kalman gain
        self.y = self.residual_z(z, zp)  # residual

        self.x = self.x + dot(self.K, self.y)
        self.P = self.P - dot(self.K, Pz).dot(self.K.T)

        self.log_likelihood = logpdf(self.y, np.zeros(len(self.y)), Pz)
    def update(self, z, R_inv=None):
        """
        Add a new measurement (z) to the kalman filter. If z is None, nothing
        is changed.

        Parameters
        ----------

        z : np.array
            measurement for this update.

        R : np.array, scalar, or None
            Optionally provide R to override the measurement noise for this
            one call, otherwise  self.R will be used.
        """

        if z is None:
            return

        if R_inv is None:
            R_inv = self.R_inv
        elif np.isscalar(R_inv):
            R_inv = eye(self.dim_z) * R_inv

        # rename for readability and a tiny extra bit of speed
        H = self.H
        H_T = H.T
        P_inv = self.P_inv
        x = self.x

        if self._no_information:
            self.x = dot(P_inv, x) + dot(H_T, R_inv).dot(z)
            self.P_inv = P_inv + dot(H_T, R_inv).dot(H)
            self.log_likelihood = math.log(sys.float_info.min)
            self.likelihood = sys.float_info.min

        else:
            # y = z - Hx
            # error (residual) between measurement and prediction
            self.y = z - dot(H, x)

            # S = HPH' + R
            # project system uncertainty into measurement space
            self.S = P_inv + dot(H_T, R_inv).dot(H)
            self.K = dot(self.inv(self.S), H_T).dot(R_inv)

            # x = x + Ky
            # predict new x with residual scaled by the kalman gain
            self.x = x + dot(self.K, self.y)
            self.P_inv = P_inv + dot(H_T, R_inv).dot(H)

            self.z = np.copy(reshape_z(z, self.dim_z, np.ndim(self.x)))

            if self.compute_log_likelihood:
                self.log_likelihood = logpdf(x=self.y, cov=self.S)
                self.likelihood = math.exp(self.log_likelihood)
                if self.likelihood == 0:
                    self.likelihood = sys.float_info.min
Exemplo n.º 8
0
    def update(self, z, R=None, H=None):
        """
        Add a new measurement (z) to the Kalman filter. If z is None, nothing
        is changed.

        Parameters
        ----------
        z : (dim_z, 1): array_like
            measurement for this update. z can be a scalar if dim_z is 1,
            otherwise it must be convertible to a column vector.

        R : np.array, scalar, or None
            Optionally provide R to override the measurement noise for this
            one call, otherwise  self.R will be used.

        H : np.array, or None
            Optionally provide H to override the measurement function for this
            one call, otherwise self.H will be used.
        """

        if z is None:
            return

        z = _reshape_z(z, self.dim_z, self.x.ndim)

        if R is None:
            R = self.R
        elif isscalar(R):
            R = eye(self.dim_z) * R

        if H is None:
            H = self.H

        # y = z - Hx
        # error (residual) between measurement and prediction
        self.y = z - dot(H, self.x)

        # common subexpression for speed
        PHT = dot(self.P, H.T)

        # S = HPH' + R
        # project system uncertainty into measurement space
        self.S = dot(H, PHT) + R

        # K = PH'inv(S)
        # map system uncertainty into kalman gain
        self.K = dot(PHT, self.inv(self.S))

        # x = x + Ky
        # predict new x with residual scaled by the kalman gain
        self.x = self.x + dot(self.K, self.y)

        # P = (I-KH)P(I-KH)' + KRK'
        I_KH = self._I - dot(self.K, H)
        self.P = dot(dot(I_KH, self.P), I_KH.T) + dot(dot(self.K, R), self.K.T)

        if self.compute_log_likelihood:
            self.log_likelihood = logpdf(x=self.y, cov=self.S)
Exemplo n.º 9
0
def test_logpdf():
    assert 3.9 < exp(logpdf(1, 1, .01)) < 4.
    assert 3.9 < exp(logpdf([1], [1], .01)) < 4.
    assert 3.9 < exp(logpdf([[1]], [[1]], .01)) < 4.

    logpdf([1., 2], [1.1, 2], cov=np.array([[1., 2], [2, 5]]), allow_singular=False)
    logpdf([1., 2], [1.1, 2], cov=np.array([[1., 2], [2, 5]]), allow_singular=True)
Exemplo n.º 10
0
def log_likelihood_of(kf, z):
    """
        log likelihood of the measurement `z`. This should only be called
        after a call to update(). Calling after predict() will yield an
        incorrect result."""

    if z is None:
        return kf.log(sys.float_info.min)
    return logpdf(z, kf.z, kf.S)
Exemplo n.º 11
0
def test_logpdf2():
    z = np.array([1., 2.])
    mean = np.array([1.1, 2])
    cov = np.array([[1., 2], [2, 5]])

    p = logpdf(z, mean, cov, allow_singular=False)
    p2 = log_multivariate_normal_density(z, mean, cov)
    print('p', p)
    print('p2', p2)
    print('p-p2', p - p2)
Exemplo n.º 12
0
    def _smooth_backstep(self, modal_probs_current, modal_probs__smoothed_next,
                         x_smooth_next, P_smooth_next, x_prior_next,
                         P_prior_next, x_post, P_post, likelihoods):

        mixing_probs, _ = self._compute_mixing_probabilities_functional(
            self.M, modal_probs_current)
        mixing_probs, _ = self._compute_mixing_probabilities_functional(
            M=mixing_probs, mu=modal_probs__smoothed_next)

        [xs_smooth_mixed_n, Ps_smooth_mixed_n
         ] = self._compute_mixed_estimates(x_smooth_next, P_smooth_next,
                                           mixing_probs)

        xs_smooth_mixed_n = x_smooth_next
        Ps_smooth_mixed_n = P_smooth_next
        xs_smooth_model = []
        Ps_smooth_model = []
        for i, f in enumerate(self.filters):
            [x_temp,
             P_temp] = f._rts_backstep(x_post[i], P_post[i],
                                       xs_smooth_mixed_n[i],
                                       Ps_smooth_mixed_n[i], x_prior_next[i],
                                       P_prior_next[i])
            xs_smooth_model.append(x_temp)
            Ps_smooth_model.append(P_temp)

        likelihood_smooth = []

        for j, f_j in enumerate(self.filters):
            cumulation = 0.0
            for i, f_i in enumerate(self.filters):
                #Resizing by multiplying by lots of zeros slows the program down substantially
                z = np.zeros(self.x.shape)
                x_i = self.add(x_smooth_next[i], z)
                x_j = self.add(x_prior_next[i], z)
                P_j = self.add(P_prior_next[i], np.zeros(self.P.shape))
                cumulation = cumulation + self.M[i, j] * logpdf(x_i, x_j, P_j)
            #likelihood_smooth.append(likelihoods[j])
            likelihood_smooth.append(cumulation)

        mu_smooth = (likelihood_smooth * modal_probs_current)
        mu_smooth = mu_smooth / mu_smooth.sum()

        x_smooth_out = np.zeros(self.x.shape)
        P_smooth_out = np.zeros(self.P.shape)

        for i, (f, mu) in enumerate(zip(self.filters, mu_smooth)):
            x_smooth_out = self.add(x_smooth_out, xs_smooth_model[i] * mu)

        for i, (f, mu) in enumerate(zip(self.filters, mu_smooth)):
            y = self.add(f.x, -self.x_smooth)
            P_smooth_out = self.add(
                P_smooth_out,
                (self.add(mu * outer(y, y), mu * Ps_smooth_model[i])))
        return x_smooth_out, P_smooth_out
Exemplo n.º 13
0
    def update(self, z, R=None, H=None):

        if z is None:
            return

        if R is None:
            R = self.R
        elif isscalar(R):
            R = eye(self.dim_z) * R

        # rename for readability and a tiny extra bit of speed
        if H is None:
            H = self.H
        P = self.P
        x = self.x

        # handle special case: if z is in form [[z]] but x is not a column
        # vector dimensions will not match
        if x.ndim==1 and shape(z) == (1,1):
            z = z[0]

        if shape(z) == (): # is it scalar, e.g. z=3 or z=np.array(3)
            z = np.asarray([z])

        # y = z - Hx
        # error (residual) between measurement and prediction
        Hx = dot(H, x)

        assert shape(Hx) == shape(z) or (shape(Hx) == (1,1) and shape(z) == (1,)), \
               'shape of z should be {}, but it is {}'.format(
               shape(Hx), shape(z))
        self.y = z - Hx

        # S = HPH' + R
        # project system uncertainty into measurement space
        self.S = dot3(H, P, H.T) + R

        # K = PH'inv(S)
        # map system uncertainty into kalman gain
        self.K = dot3(P, H.T, linalg.inv(self.S))

        # x = x + Ky
        # predict new x with residual scaled by the kalman gain
        self.x = x + dot(self.K, self.y)

        # P = (I-KH)P(I-KH)' + KRK'
        I_KH = self.I - dot(self.K, H)
        self.P = dot3(I_KH, P, I_KH.T) + dot3(self.K, R, self.K.T)

        self.log_likelihood = logpdf(z, dot(H, x), self.S)
Exemplo n.º 14
0
    def update(self, z, R=None):
        """
        Add a new measurement (z) to the kalman filter. If z is None, nothing
        is changed.

        Parameters
        ----------

        z : np.array
            measurement for this update.

        R : np.array, scalar, or None
            Optionally provide R to override the measurement noise for this
            one call, otherwise  self.R will be used.
        """

        if z is None:
            return

        if R is None:
            R = self.R
        elif np.isscalar(R):
            R = eye(self.dim_z) * R

        # y = z - Hx
        # error (residual) between measurement and prediction
        self.y = z - dot(self.H, self.x)

        PHT = dot(self.P, self.H.T)

        # S = HPH' + R
        # project system uncertainty into measurement space
        self.S = dot(self.H, PHT) + R

        # K = PH'inv(S)
        # map system uncertainty into kalman gain
        self.K = PHT.dot(linalg.inv(self.S))

        # x = x + Ky
        # predict new x with residual scaled by the kalman gain
        self.x = self.x + dot(self.K, self.y)

        # P = (I-KH)P(I-KH)' + KRK'
        I_KH = self.I - dot(self.K, self.H)
        self.P = dot(I_KH, self.P).dot(I_KH.T) + dot(self.K, R).dot(self.K.T)

        if self.compute_log_likelihood:
            self.log_likelihood = logpdf(x=self.y, cov=self.S)
Exemplo n.º 15
0
    def smooth(self):
        """
        
        Performs IMM smoothing one step.
        This should be performed after the update() is performed.
        
        
        """
        x_smooths, P_smooths, likelihood_smooth = [], [], []
        mu_smooth = self.mu.copy()

        for i, f in enumerate(self.filters):
            # propagate using the mixed state estimate and covariance
            Smoothing_gain = dot(f.P_post.dot(f.F.T), np.linalg.inv(f.P_prior))

            x_smooths.append(f.x_post +
                             Smoothing_gain.dot(self.x_mixed[i] - f.x_prior))
            P_smooths.append(f.P_post - Smoothing_gain.dot(
                self.P_mixed[i] - f.P_prior).dot(Smoothing_gain.T))

        for j, f_j in enumerate(self.filters):
            cumulation = 0.0
            for i, f_i in enumerate(self.filters):
                #Resizing by multiplying by lots of zeros slows the program down substantially
                z = np.zeros(self.x.shape)
                x_i = self.add(x_smooths[i], z)
                x_j = self.add(f_i.x_prior, z)
                P_j = self.add(f_i.P_prior, np.zeros(self.P.shape))
                cumulation = cumulation + self.M[i, j] * logpdf(x_i, x_j, P_j)
            likelihood_smooth.append(cumulation)
            likelihood_smooth.clear()

        for j, f_j in enumerate(self.filters):
            likelihood_smooth.append(f_j.likelihood)

        mu_smooth = (likelihood_smooth * mu_smooth)
        mu_smooth = mu_smooth / mu_smooth.sum()

        self.x_smooth.fill(0)
        for i, (f, mu) in enumerate(zip(self.filters, mu_smooth)):
            self.x_smooth = self.add(self.x_smooth, x_smooths[i] * mu)

        self.P_smooth.fill(0)
        for i, (f, mu) in enumerate(zip(self.filters, mu_smooth)):
            y = self.add(f.x, -self.x_smooth)
            self.P_smooth = self.add(
                self.P_smooth, (self.add(mu * outer(y, y), mu * P_smooths[i])))
Exemplo n.º 16
0
    def update_correlated(self, z, R=None, H=None):

        if z is None:
            return

        if R is None:
            R = self.R
        elif isscalar(R):
            R = eye(self.dim_z) * R

        # rename for readability and a tiny extra bit of speed
        if H is None:
            H = self.H
        x = self.x
        P = self.P
        M = self.M

        # handle special case: if z is in form [[z]] but x is not a column
        # vector dimensions will not match
        if x.ndim==1 and shape(z) == (1,1):
            z = z[0]

        if shape(z) == (): # is it scalar, e.g. z=3 or z=np.array(3)
            z = np.asarray([z])

        # y = z - Hx
        # error (residual) between measurement and prediction
        self.y = z - dot(H, x)

        # project system uncertainty into measurement space
        self.S = dot3(H, P, H.T) + dot(H, M) + dot(M.T, H.T) + R

        # K = PH'inv(S)
        # map system uncertainty into kalman gain
        self.K = dot(dot(P, H.T) + M, linalg.inv(self.S))

        # x = x + Ky
        # predict new x with residual scaled by the kalman gain
        self.x = x + dot(self.K, self.y)
        self.P = P - dot(self.K, dot(H, P) + M.T)

        # compute log likelihood
        self.log_likelihood = logpdf(z, dot(H, x), self.S)
Exemplo n.º 17
0
    def update(self, z, R=None, UT=None, hx_args=()):
        if z is None:
            return

        if not isinstance(hx_args, tuple):
            hx_args = (hx_args,)

        if UT is None:
            UT = unscented_transform

        if R is None:
            R = self.R
        elif isscalar(R):
            R = eye(self._dim_z) * R

        for i in range(self._num_sigmas):
            self.sigmas_h[i] = self.hx(self.sigmas_f[i], *hx_args)

        # mean and covariance of prediction passed through unscented transform
        zp, Pz = UT(self.sigmas_h, self.Wm, self.Wc, R, self.z_mean, self.residual_z)

        # compute cross variance of the state and the measurements
        Pxz = zeros((self._dim_x, self._dim_z))
        for i in range(self._num_sigmas):
            dx = self.residual_x(self.sigmas_f[i], self.x)
            dz =  self.residual_z(self.sigmas_h[i], zp)
            Pxz += self.Wc[i] * outer(dx, dz)


        self.K = dot(Pxz, inv(Pz))        # Kalman gain
        self.y = self.residual_z(z, zp)   # residual

        self.x = self.x + dot(self.K, self.y)
        self.P = self.P - dot3(self.K, Pz, self.K.T)

        self.log_likelihood = logpdf(self.y, np.zeros(len(self.y)), Pz)
        # Store transformed state uncertainty but without added sensor noise
        self.Pz = Pz - R
Exemplo n.º 18
0
    def find_likelihood(self, z, R=None, H=None):
        if np.all(np.isnan(z)):
            x_mean_velocity = np.mean(self.velocity_x[-5:])
            y_mean_velocity = np.mean(self.velocity_y[-5:])

            z_noise = np.random.normal(loc=0, scale=0, size=2)
            z = list(
                self.x[0:2] - [self.velocity_x[-1], self.velocity_y[-1]] + [x_mean_velocity, y_mean_velocity] + z_noise)
        z = reshape_z(z, self.dim_z, self.x.ndim)
        if R is None:
            R = self.R
        elif isscalar(R):
            R = eye(self.dim_z) * R
        if H is None:
            H = self.H
        y = z - dot(H, self.x)
        PHT = dot(self.P, H.T)
        S = dot(H, PHT) + R
        _log_likelihood = logpdf(x=y, cov=S)

        _likelihood = exp(_log_likelihood)
        if _likelihood == 0:
            _likelihood = sys.float_info.min
        return _likelihood
Exemplo n.º 19
0
    def update(self,
               z,
               HJacobian,
               Hx,
               R=None,
               args=(),
               hx_args=(),
               residual=np.subtract):
        """ Performs the update innovation of the extended Kalman filter.

        Parameters
        ----------

        z : np.array
            measurement for this step.
            If `None`, only predict step is perfomed.

        HJacobian : function
           function which computes the Jacobian of the H matrix (measurement
           function). Takes state variable (self.x) as input, returns H.

        Hx : function
            function which takes as input the state variable (self.x) along
            with the optional arguments in hx_args, and returns the measurement
            that would correspond to that state.

        R : np.array, scalar, or None
            Optionally provide R to override the measurement noise for this
            one call, otherwise  self.R will be used.

        args : tuple, optional, default (,)
            arguments to be passed into HJacobian after the required state
            variable. for robot localization you might need to pass in
            information about the map and time of day, so you might have
            `args=(map_data, time)`, where the signature of HCacobian will
            be `def HJacobian(x, map, t)`

        hx_args : tuple, optional, default (,)
            arguments to be passed into Hx function after the required state
            variable.

        residual : function (z, z2), optional
            Optional function that computes the residual (difference) between
            the two measurement vectors. If you do not provide this, then the
            built in minus operator will be used. You will normally want to use
            the built in unless your residual computation is nonlinear (for
            example, if they are angles)
        """

        if not isinstance(args, tuple):
            args = (args, )

        if not isinstance(hx_args, tuple):
            hx_args = (hx_args, )

        if R is None:
            R = self.R
        elif np.isscalar(R):
            R = eye(self.dim_z) * R

        if np.isscalar(z) and self.dim_z == 1:
            z = np.asarray([z], float)

        H = HJacobian(self.x, *args)

        PHT = dot(self.P, H.T)
        self.S = dot(H, PHT) + R
        self.K = PHT.dot(linalg.inv(self.S))

        hx = Hx(self.x, *hx_args)
        self.y = residual(z, hx)
        self.x = self.x + dot(self.K, self.y)

        # P = (I-KH)P(I-KH)' + KRK' is more numerically stable
        # and works for non-optimal K vs the equation
        # P = (I-KH)P usually seen in the literature.
        I_KH = self._I - dot(self.K, H)
        self.P = dot(I_KH, self.P).dot(I_KH.T) + dot(self.K, R).dot(self.K.T)

        if self.compute_log_likelihood:
            self.log_likelihood = logpdf(x=self.y, cov=self.S)
            self.likelihood = math.exp(self.log_likelihood)
            if self.likelihood == 0:
                self.likelihood = sys.float_info.min
Exemplo n.º 20
0
    def predict_update(self, z, HJacobian, Hx, args=(), hx_args=(), u=0):
        """ Performs the predict/update innovation of the extended Kalman
        filter.

        Parameters
        ----------

        z : np.array
            measurement for this step.
            If `None`, only predict step is perfomed.

        HJacobian : function
           function which computes the Jacobian of the H matrix (measurement
           function). Takes state variable (self.x) as input, along with the
           optional arguments in args, and returns H.

        Hx : function
            function which takes as input the state variable (self.x) along
            with the optional arguments in hx_args, and returns the measurement
            that would correspond to that state.

        args : tuple, optional, default (,)
            arguments to be passed into HJacobian after the required state
            variable.

        hx_args : tuple, optional, default (,)
            arguments to be passed into Hx after the required state
            variable.

        u : np.array or scalar
            optional control vector input to the filter.
        """
        #pylint: disable=too-many-locals

        if not isinstance(args, tuple):
            args = (args, )

        if not isinstance(hx_args, tuple):
            hx_args = (hx_args, )

        if np.isscalar(z) and self.dim_z == 1:
            z = np.asarray([z], float)

        F = self.F
        B = self.B
        P = self.P
        Q = self.Q
        R = self.R
        x = self.x

        H = HJacobian(x, *args)

        # predict step
        x = dot(F, x) + dot(B, u)
        P = dot(F, P).dot(F.T) + Q

        # save prior
        self.x_prior = np.copy(self.x)
        self.P_prior = np.copy(self.P)

        # update step
        PHT = dot(P, H.T)
        self.S = dot(H, PHT) + R
        self.K = dot(PHT, linalg.inv(self.S))

        self.y = z - Hx(x, *hx_args)
        self.x = x + dot(self.K, self.y)

        I_KH = self._I - dot(self.K, H)
        self.P = dot(I_KH, P).dot(I_KH.T) + dot(self.K, R).dot(self.K.T)

        if self.compute_log_likelihood:
            self.log_likelihood = logpdf(x=self.y, cov=self.S)
            self.likelihood = math.exp(self.log_likelihood)
            if self.likelihood == 0:
                self.likelihood = sys.float_info.min
Exemplo n.º 21
0
def update(x, P, z, R, H=None, return_all=False):
    """
    Add a new measurement (z) to the Kalman filter. If z is None, nothing
    is changed.

    This can handle either the multidimensional or unidimensional case. If
    all parameters are floats instead of arrays the filter will still work,
    and return floats for x, P as the result.

    update(1, 2, 1, 1, 1)  # univariate
    update(x, P, 1



    Parameters
    ----------

    x : numpy.array(dim_x, 1), or float
        State estimate vector

    P : numpy.array(dim_x, dim_x), or float
        Covariance matrix

    z : numpy.array(dim_z, 1), or float
        measurement for this update.

    R : numpy.array(dim_z, dim_z), or float
        Measurement noise matrix

    H : numpy.array(dim_x, dim_x), or float, optional
        Measurement function. If not provided, a value of 1 is assumed.

    return_all : bool, default False
        If true, y, K, S, and log_likelihood are returned, otherwise
        only x and P are returned.

    Returns
    -------

    x : numpy.array
        Posterior state estimate vector

    P : numpy.array
        Posterior covariance matrix

    y : numpy.array or scalar
        Residua. Difference between measurement and state in measurement space

    K : numpy.array
        Kalman gain

    S : numpy.array
        System uncertainty in measurement space

    log_likelihood : float
        log likelihood of the measurement
    """

    if z is None:
        if return_all:
            return x, P, None, None, None, None
        else:
            return x, P

    if H is None:
        H = np.array([1])

    if np.isscalar(H):
        H = np.array([H])

    if not np.isscalar(x):
        # handle special case: if z is in form [[z]] but x is not a column
        # vector dimensions will not match
        if x.ndim == 1 and shape(z) == (1, 1):
            z = z[0]

        if shape(z) == ():  # is it scalar, e.g. z=3 or z=np.array(3)
            z = np.asarray([z])

    # error (residual) between measurement and prediction
    y = z - dot(H, x)

    # project system uncertainty into measurement space
    S = dot3(H, P, H.T) + R

    # map system uncertainty into kalman gain
    try:
        K = dot3(P, H.T, linalg.inv(S))
    except:
        K = dot3(P, H.T, 1 / S)

    # predict new x with residual scaled by the kalman gain
    x = x + dot(K, y)

    # P = (I-KH)P(I-KH)' + KRK'
    KH = dot(K, H)

    try:
        I_KH = np.eye(KH.shape[0]) - KH
    except:
        I_KH = np.array(1 - KH)
    P = dot3(I_KH, P, I_KH.T) + dot3(K, R, K.T)

    if return_all:
        # compute log likelihood
        log_likelihood = logpdf(z, dot(H, x), S)
        return x, P, y, K, S, log_likelihood
    else:
        return x, P
Exemplo n.º 22
0
    def update_correlated(self, z, R=None, H=None):
        """ Add a new measurement (z) to the Kalman filter assuming that
        process noise and measurement noise are correlated as defined in
        the `self.M` matrix.

        If z is None, nothing is changed.

        Parameters
        ----------
        z : np.array
            measurement for this update.

        R : np.array, scalar, or None
            Optionally provide R to override the measurement noise for this
            one call, otherwise  self.R will be used.

        H : np.array,  or None
            Optionally provide H to override the measurement function for this
            one call, otherwise  self.H will be used.
        """

        if z is None:
            return

        if R is None:
            R = self.R
        elif isscalar(R):
            R = eye(self.dim_z) * R

        # rename for readability and a tiny extra bit of speed
        if H is None:
            H = self.H
        x = self._x
        P = self._P
        M = self.M

        # handle special case: if z is in form [[z]] but x is not a column
        # vector dimensions will not match
        if x.ndim == 1 and shape(z) == (1, 1):
            z = z[0]

        if shape(z) == ():  # is it scalar, e.g. z=3 or z=np.array(3)
            z = np.asarray([z])

        # y = z - Hx
        # error (residual) between measurement and prediction
        self._y = z - dot(H, x)

        # project system uncertainty into measurement space
        S = dot3(H, P, H.T) + dot(H, M) + dot(M.T, H.T) + R

        # K = PH'inv(S)
        # map system uncertainty into kalman gain
        K = dot(dot(P, H.T) + M, linalg.inv(S))

        # x = x + Ky
        # predict new x with residual scaled by the kalman gain
        self._x = x + dot(K, self._y)
        self._P = P - dot(K, dot(H, P) + M.T)

        self._S = S
        self._K = K

        # compute log likelihood
        self.log_likelihood = logpdf(z, dot(H, x), S)
Exemplo n.º 23
0
    def update(self, z, R=None, H=None):
        """
        Add a new measurement (z) to the Kalman filter. If z is None, nothing
        is changed.

        Parameters
        ----------
        z : np.array
            measurement for this update. z can be a scalar if dim_z is 1,
            otherwise it must be a column vector.

        R : np.array, scalar, or None
            Optionally provide R to override the measurement noise for this
            one call, otherwise  self.R will be used.

        H : np.array, or None
            Optionally provide H to override the measurement function for this
            one call, otherwise self.H will be used.
        """

        if z is None:
            return

        if R is None:
            R = self.R
        elif isscalar(R):
            R = eye(self.dim_z) * R

        # rename for readability and a tiny extra bit of speed
        if H is None:
            H = self.H
        P = self._P
        x = self._x

        # handle special case: if z is in form [[z]] but x is not a column
        # vector dimensions will not match
        if x.ndim == 1 and shape(z) == (1, 1):
            z = z[0]

        if shape(z) == ():  # is it scalar, e.g. z=3 or z=np.array(3)
            z = np.asarray([z])

        # y = z - Hx
        # error (residual) between measurement and prediction
        Hx = dot(H, x)

        assert shape(Hx) == shape(z) or (shape(Hx) == (1,1) and shape(z) == (1,)), \
               'shape of z should be {}, but it is {}'.format(
               shape(Hx), shape(z))
        self._y = z - Hx

        # S = HPH' + R
        # project system uncertainty into measurement space
        S = dot3(H, P, H.T) + R

        # K = PH'inv(S)
        # map system uncertainty into kalman gain
        K = dot3(P, H.T, linalg.inv(S))

        # x = x + Ky
        # predict new x with residual scaled by the kalman gain
        self._x = x + dot(K, self._y)

        # P = (I-KH)P(I-KH)' + KRK'
        I_KH = self._I - dot(K, H)
        self._P = dot3(I_KH, P, I_KH.T) + dot3(K, R, K.T)

        self._S = S
        self._K = K

        self.log_likelihood = logpdf(z, dot(H, x), S)
Exemplo n.º 24
0
    def log_likelihood(self):

        if self._log_likelihood is None:
            self._log_likelihood = logpdf(x=self.y, cov=self.S)
        return self._log_likelihood
Exemplo n.º 25
0
    def update(self, z, R=None, UT=None, hx=None, **hx_args):
        """
        Update the UKF with the given measurements. On return,
        self.x and self.P contain the new mean and covariance of the filter.

        Parameters
        ----------

        z : numpy.array of shape (dim_z)
            measurement vector

        R : numpy.array((dim_z, dim_z)), optional
            Measurement noise. If provided, overrides self.R for
            this function call.

        UT : function(sigmas, Wm, Wc, noise_cov), optional
            Optional function to compute the unscented transform for the sigma
            points passed through hx. Typically the default function will
            work - you can use x_mean_fn and z_mean_fn to alter the behavior
            of the unscented transform.

        **hx_args : keyword argument
            arguments to be passed into h(x) after x -> h(x, **hx_args)
        """

        if z is None:
            return

        if hx is None:
            hx = self.hx

        if UT is None:
            UT = unscented_transform

        if R is None:
            R = self.R
        elif isscalar(R):
            R = eye(self._dim_z) * R

        # pass prior sigmas through h(x) to get measurement sigmas
        # the shape of sigmas_h will vary if the shape of z varies, so
        # recreate each time
        sigmas_h = []
        for s in self.sigmas_f:
            sigmas_h.append(hx(s, **hx_args))

        self.sigmas_h = np.atleast_2d(sigmas_h)

        # mean and covariance of prediction passed through unscented transform
        zp, Pz = UT(self.sigmas_h, self.Wm, self.Wc, R, self.z_mean,
                    self.residual_z)

        # compute cross variance of the state and the measurements
        Pxz = self.cross_variance(self.x, zp, self.sigmas_f, self.sigmas_h)

        self.K = dot(Pxz, self.inv(Pz))  # Kalman gain
        self.y = self.residual_z(z, zp)  # residual

        # update Gaussian state estimate (x, P)
        self.x = self.x + dot(self.K, self.y)
        self.P = self.P - dot(self.K, dot(Pz, self.K.T))

        if self.compute_log_likelihood:
            self.log_likelihood = logpdf(x=self.y, cov=Pz)
            self.likelihood = math.exp(self.log_likelihood)
            if self.likelihood == 0:
                self.likelihood = sys.float_info.min
Exemplo n.º 26
0
 def dist(self, observation):
     # negative log likelihood (so that smaller is better)
     kf = self._kf
     residual = observation.centroid - self.mean()
     S = np.dot(kf.H, np.dot(kf.P, kf.H.T)) + observation.covariance
     return -logpdf(x=residual, cov=S)
Exemplo n.º 27
0
def update(x, P, z, R, H=None, return_all=False):
    """
    Add a new measurement (z) to the Kalman filter. If z is None, nothing
    is changed.

    This can handle either the multidimensional or unidimensional case. If
    all parameters are floats instead of arrays the filter will still work,
    and return floats for x, P as the result.

    update(1, 2, 1, 1, 1)  # univariate
    update(x, P, 1



    Parameters
    ----------

    x : numpy.array(dim_x, 1), or float
        State estimate vector

    P : numpy.array(dim_x, dim_x), or float
        Covariance matrix

    z : (dim_z, 1): array_like
        measurement for this update. z can be a scalar if dim_z is 1,
        otherwise it must be convertible to a column vector.

    R : numpy.array(dim_z, dim_z), or float
        Measurement noise matrix

    H : numpy.array(dim_x, dim_x), or float, optional
        Measurement function. If not provided, a value of 1 is assumed.

    return_all : bool, default False
        If true, y, K, S, and log_likelihood are returned, otherwise
        only x and P are returned.

    Returns
    -------

    x : numpy.array
        Posterior state estimate vector

    P : numpy.array
        Posterior covariance matrix

    y : numpy.array or scalar
        Residua. Difference between measurement and state in measurement space

    K : numpy.array
        Kalman gain

    S : numpy.array
        System uncertainty in measurement space

    log_likelihood : float
        log likelihood of the measurement
    """


    if z is None:
        if return_all:
            return x, P, None, None, None, None
        else:
            return x, P

    if H is None:
        H = np.array([1])

    if np.isscalar(H):
        H = np.array([H])

    Hx = np.atleast_1d(dot(H, x))
    z = _reshape_z(z, Hx.shape[0], x.ndim)

    # error (residual) between measurement and prediction
    y = z - Hx

    # project system uncertainty into measurement space
    S = dot(dot(H, P), H.T) + R


    # map system uncertainty into kalman gain
    try:
        K = dot(dot(P, H.T), linalg.inv(S))
    except:
        K = dot(dot(P, H.T), 1./S)


    # predict new x with residual scaled by the kalman gain
    x = x + dot(K, y)

    # P = (I-KH)P(I-KH)' + KRK'
    KH = dot(K, H)


    try:
        I_KH = np.eye(KH.shape[0]) - KH
    except:
        I_KH = np.array([1 - KH])
    P = dot(dot(I_KH, P), I_KH.T) + dot(dot(K, R), K.T)


    if return_all:
        # compute log likelihood
        log_likelihood = logpdf(z, dot(H, x), S)
        return x, P, y, K, S, log_likelihood
    else:
        return x, P
Exemplo n.º 28
0
def update(x, P, z, R, H=None, return_all=False):
    """
    Add a new measurement (z) to the Kalman filter. If z is None, nothing
    is changed.

    This can handle either the multidimensional or unidimensional case. If
    all parameters are floats instead of arrays the filter will still work,
    and return floats for x, P as the result.

    update(1, 2, 1, 1, 1)  # univariate
    update(x, P, 1



    Parameters
    ----------

    x : numpy.array(dim_x, 1), or float
        State estimate vector

    P : numpy.array(dim_x, dim_x), or float
        Covariance matrix

    z : numpy.array(dim_z, 1), or float
        measurement for this update.

    R : numpy.array(dim_z, dim_z), or float
        Measurement noise matrix

    H : numpy.array(dim_x, dim_x), or float, optional
        Measurement function. If not provided, a value of 1 is assumed.

    return_all : bool, default False
        If true, y, K, S, and log_likelihood are returned, otherwise
        only x and P are returned.

    Returns
    -------

    x : numpy.array
        Posterior state estimate vector

    P : numpy.array
        Posterior covariance matrix

    y : numpy.array or scalar
        Residua. Difference between measurement and state in measurement space

    K : numpy.array
        Kalman gain

    S : numpy.array
        System uncertainty in measurement space

    log_likelihood : float
        log likelihood of the measurement
    """


    if z is None:
        if return_all:
            return x, P, None, None, None, None
        else:
            return x, P

    if H is None:
        H = np.array([1])

    if np.isscalar(H):
        H = np.array([H])

    if not np.isscalar(x):
        # handle special case: if z is in form [[z]] but x is not a column
        # vector dimensions will not match
        if x.ndim==1 and shape(z) == (1,1):
            z = z[0]

        if shape(z) == (): # is it scalar, e.g. z=3 or z=np.array(3)
            z = np.asarray([z])

    # error (residual) between measurement and prediction
    y = z - dot(H, x)

    # project system uncertainty into measurement space
    S = dot3(H, P, H.T) + R


    # map system uncertainty into kalman gain
    try:
        K = dot3(P, H.T, linalg.inv(S))
    except:
        K = dot3(P, H.T, 1/S)


    # predict new x with residual scaled by the kalman gain
    x = x + dot(K, y)

    # P = (I-KH)P(I-KH)' + KRK'
    KH = dot(K, H)


    try:
        I_KH = np.eye(KH.shape[0]) - KH
    except:
        I_KH = np.array(1 - KH)
    P = dot3(I_KH, P, I_KH.T) + dot3(K, R, K.T)


    if return_all:
        # compute log likelihood
        log_likelihood = logpdf(z, dot(H, x), S)
        return x, P, y, K, S, log_likelihood
    else:
        return x, P
Exemplo n.º 29
0
    def update_correlated(self, z, R=None, H=None):
        """ Add a new measurement (z) to the Kalman filter assuming that
        process noise and measurement noise are correlated as defined in
        the `self.M` matrix.

        If z is None, nothing is changed.

        Parameters
        ----------
        z : np.array
            measurement for this update.

        R : np.array, scalar, or None
            Optionally provide R to override the measurement noise for this
            one call, otherwise  self.R will be used.

        H : np.array,  or None
            Optionally provide H to override the measurement function for this
            one call, otherwise  self.H will be used.
        """

        if z is None:
            return

        if R is None:
            R = self.R
        elif isscalar(R):
            R = eye(self.dim_z) * R

        # rename for readability and a tiny extra bit of speed
        if H is None:
            H = self.H
        x = self._x
        P = self._P
        M = self.M

        # handle special case: if z is in form [[z]] but x is not a column
        # vector dimensions will not match
        if x.ndim==1 and shape(z) == (1,1):
            z = z[0]

        if shape(z) == (): # is it scalar, e.g. z=3 or z=np.array(3)
            z = np.asarray([z])

        # y = z - Hx
        # error (residual) between measurement and prediction
        self._y = z - dot(H, x)

        # project system uncertainty into measurement space
        S = dot3(H, P, H.T) + dot(H, M) + dot(M.T, H.T) + R

        # K = PH'inv(S)
        # map system uncertainty into kalman gain
        K = dot(dot(P, H.T) + M, linalg.inv(S))

        # x = x + Ky
        # predict new x with residual scaled by the kalman gain
        self._x = x + dot(K, self._y)
        self._P = P - dot(K, dot(H, P) + M.T)

        self._S = S
        self._K = K

        # compute log likelihood
        self.log_likelihood = logpdf(z, dot(H, x), S)
Exemplo n.º 30
0
    def update(self, z, R=None, H=None):
        """
        Add a new measurement (z) to the Kalman filter. If z is None, nothing
        is changed.

        Parameters
        ----------
        z : np.array
            measurement for this update. z can be a scalar if dim_z is 1,
            otherwise it must be a column vector.

        R : np.array, scalar, or None
            Optionally provide R to override the measurement noise for this
            one call, otherwise  self.R will be used.

        H : np.array, or None
            Optionally provide H to override the measurement function for this
            one call, otherwise self.H will be used.
        """

        if z is None:
            return

        if R is None:
            R = self.R
        elif isscalar(R):
            R = eye(self.dim_z) * R

        # rename for readability and a tiny extra bit of speed
        if H is None:
            H = self.H
        P = self._P
        x = self._x

        # handle special case: if z is in form [[z]] but x is not a column
        # vector dimensions will not match
        if x.ndim==1 and shape(z) == (1,1):
            z = z[0]

        if shape(z) == (): # is it scalar, e.g. z=3 or z=np.array(3)
            z = np.asarray([z])

        # y = z - Hx
        # error (residual) between measurement and prediction
        Hx = dot(H, x)

        assert shape(Hx) == shape(z) or (shape(Hx) == (1,1) and shape(z) == (1,)), \
               'shape of z should be {}, but it is {}'.format(
               shape(Hx), shape(z))
        self._y = z - Hx

        # S = HPH' + R
        # project system uncertainty into measurement space
        S = dot3(H, P, H.T) + R

        # K = PH'inv(S)
        # map system uncertainty into kalman gain
        K = dot3(P, H.T, linalg.inv(S))

        # x = x + Ky
        # predict new x with residual scaled by the kalman gain
        self._x = x + dot(K, self._y)

        # P = (I-KH)P(I-KH)' + KRK'
        I_KH = self._I - dot(K, H)
        self._P = dot3(I_KH, P, I_KH.T) + dot3(K, R, K.T)

        self._S = S
        self._K = K

        self.log_likelihood = logpdf(z, dot(H, x), S)
Exemplo n.º 31
0
    def update(self, z, R=None, UT=None, hx_args=()):
        """
        Update the UKF with the given measurements. On return,
        self.x and self.P contain the new mean and covariance of the filter.

        Parameters
        ----------

        z : numpy.array of shape (dim_z)
            measurement vector

        R : numpy.array((dim_z, dim_z)), optional
            Measurement noise. If provided, overrides self.R for
            this function call.

        UT : function(sigmas, Wm, Wc, noise_cov), optional
            Optional function to compute the unscented transform for the sigma
            points passed through hx. Typically the default function will
            work - you can use x_mean_fn and z_mean_fn to alter the behavior
            of the unscented transform.

        hx_args : tuple, optional, default (,)
            arguments to be passed into Hx function after the required state
            variable.
        """

        if z is None:
            return

        if not isinstance(hx_args, tuple):
            hx_args = (hx_args, )

        if UT is None:
            UT = unscented_transform

        if R is None:
            R = self.R
        elif isscalar(R):
            R = eye(self._dim_z) * R

        for i, s in enumerate(self.sigmas_f):
            self.sigmas_h[i] = self.hx(s, *hx_args)

        # mean and covariance of prediction passed through unscented transform
        zp, Pz = UT(self.sigmas_h, self.Wm, self.Wc, R, self.z_mean,
                    self.residual_z)

        # compute cross variance of the state and the measurements
        Pxz = self.cross_variance(self.x, zp, self.sigmas_f, self.sigmas_h)

        self.K = dot(Pxz, self.inv(Pz))  # Kalman gain
        self.y = self.residual_z(z, zp)  # residual

        # update Gaussian state estimate (x, P)
        self.x = self.x + dot(self.K, self.y)
        self.P = self.P - dot(self.K, dot(Pz, self.K.T))

        if self.compute_log_likelihood:
            self.log_likelihood = logpdf(x=self.y, cov=Pz)
            self.likelihood = math.exp(self.log_likelihood)
            if self.likelihood == 0:
                self.likelihood = sys.float_info.min