def compute_kf(mu_init, sig_init, times, observations):
    """
  Compute steps of the filter given inital conditions, times, and observations
  mu_init: initial 5 dimensional state
  sig_init: intial 5x5 covariance matrix
  times: list of timestamps in sequence
  observations: 2d list of observations [[for_vel, ang_vel],...,] over sequence
  """
    # Kalman filter update class
    kf = KF()

    # Initial conditions
    prev_time = times[0]
    mu_next = mu_init
    sig_next = sig_init

    # Lists of mu and sigmas at every timestep
    mus = []
    sigmas = []

    ############################ Filter loop  ####################################
    for i in range(1, len(observations)):
        #print("\n Iteration {}".format(i))
        curr_time = times[i]
        dt = curr_time - prev_time
        prev_time = curr_time

        # Run filter with inferred velocities
        z = observations[i - 1]
        mu_next, sig_next = kf.step(mu_next, sig_next, z, dt)

        mus.append(mu_next)
        sigmas.append(sig_next)

    return np.array(mus), np.array(sigmas)
Beispiel #2
0
    def test_can_predict(self):
        x = 0.2
        v = 2.3

        kf = KF(initial_x=x, initial_v=v, accel_variance=1.2)

        kf.predict(dt=0.1)
Beispiel #3
0
    def test_after_calling_predict_mean_and_cov_are_of_right_shape(self):
        x = 0.2
        v = 2.3
        kf = KF(x, v, 1.2)
        kf.predict(0.1)

        self.assertEqual(kf.cov.shape, (2,2))
        self.assertEqual(kf.mean.shape, (2, ))
Beispiel #4
0
    def test_check_update_in_update(self):
        x = 0.2
        v = 2.3

        kf = KF(initial_x=x, initial_v=v, accel_variance=1.2)
        kf.update(meas_value=0.1, meas_variance=0.01)

        self.assertNotEqual(kf.pos, x)
        self.assertAlmostEqual(kf.vel, v)
Beispiel #5
0
    def test_calling_update_does_not_crash(self):
        x = 0.2
        v = 0.8

        kf = KF(x, v, accel_variance=1.2)

        meas_value = 0.1
        meas_variance = 0.1
        kf.update(meas_value, meas_variance)
Beispiel #6
0
    def test_after_calling_predict_mean_and_cov_are_of_right_shape(self):
        x = 0.2
        v = 2.3

        kf = KF(initial_x=x, initial_v=v, accel_variance=1.2)

        kf.predict(dt=0.1)
        self.assertEqual(kf.cov.shape, (2, 2))
        self.assertEqual(kf.mean.shape, (2, ))
Beispiel #7
0
    def test_can_predict_and_mean_and_cov_are_of_right_shape(self):
        x = 0.2
        v = 0.8

        kf = KF(x, v, accel_variance=1.2)
        kf.predict(dt=0.1)

        self.assertEqual(kf.cov.shape, (2, 2))
        self.assertEqual(kf.mean.shape, (2, ))
Beispiel #8
0
    def test_calling_update_decreases_state_uncertainty(self):
        x = 0.2
        v = 2.3
        kf = KF(x, v, 1.2)

        det_before = np.linalg.det(kf.cov)
        kf.update(0.1, 0.01)
        det_after = np.linalg.det(kf.cov)

        self.assertLess(det_after, det_before)
Beispiel #9
0
    def test_calling_update_decreases_state_uncertainty(self):
        x = 0.2
        v = 2.3

        kf = KF(initial_x=x, initial_v=v, accel_variance=1.2)

        det_before = np.linalg.det(kf.cov)
        kf.update(meas_value=0.1, meas_variance=0.01)
        det_after = np.linalg.det(kf.cov)

        self.assertLess(det_after, det_before)
Beispiel #10
0
    def test_calling_predict_increases_state_uncertainty(self):
        x = 0.2
        v = 2.3
        kf = KF(x, v, 1.2)

        for i in range(10):
            det_before = np.linalg.det(kf.cov)
            kf.predict(0.1)
            det_after = np.linalg.det(kf.cov)

            self.assertGreater(det_after, det_before)
            print(det_before, det_after)
Beispiel #11
0
    def test_after_calling_predict_increases_state_uncertainty(self):
        x = 0.2
        v = 2.3
        temp = 0
        kf = KF(initial_x=x, initial_v=v, accel_variance=1.2)
        for i in range(10):
            #l'entropie de la matrice de covariance est exprimé par son determinant, ici je m'attend à obtenir un determinant qui croit.
            #l'incertitude devant être de plus en plus grande au cours du temps.
            det_before = np.linalg.det(kf.cov)
            kf.predict(dt=0.1)
            det_after = np.linalg.det(kf.cov)

            self.assertGreater(det_after, det_before)
Beispiel #12
0
    def test_after_calling_predict_increases_state_uncertainty(self):
        x = 0.2
        v = 2.3

        kf = KF(initial_x=x, initial_v=v, accel_variance=1.2)

        for i in range(10):
            det_before = np.linalg.det(kf.cov)
            kf.predict(dt=0.1)
            det_after = np.linalg.det(kf.cov)

            self.assertGreater(det_after, det_before)
            print(det_before, det_after)
Beispiel #13
0
    def test_can_construct_with_x_and_v(self):
        x = 0.2
        v = 2.3

        kf = KF(initial_x=x, initial_v=v, accel_variance=1.2)
        self.assertAlmostEqual(kf.pos, x)
        self.assertAlmostEqual(kf.vel, v)
Beispiel #14
0
    def test_calling_predict_increases_state_uncertainty(self):
        x = 0.2
        v = 0.8

        kf = KF(x, v, accel_variance=1.2)

        for i in range(10):
            det_cov_before = np.linalg.det(kf.cov)
            #print(det_cov_before)
            #print("---")
            kf.predict(dt=0.1)
            det_cov_after = np.linalg.det(kf.cov)
            #print(det_cov_after)
            #print(" ")

            self.assertGreater(det_cov_after, det_cov_before)
Beispiel #15
0
    def test_can_construct_with_x_and_v(self):
        x = 0.2
        v = 0.8

        kf = KF(x, v, accel_variance=1.2)
        self.assertAlmostEqual(kf.pos, x)
        self.assertAlmostEqual(kf.vel, v)
Beispiel #16
0
def _fit_k_lin(nlds, k, kf_i=[]):
    data = nlds.data
    fn = nlds.fn
    # linear fit (KF)
    if (kf_i == []):
        kf = KF(data, k)  # without init params
    else:
        kf = kf_i  # with init params
    kf.em(ITER, ITERe)  # em algorithm
    nlds.setKFParams(kf)
    # fit si
    nlds = nlds.fit_si()
    if (DBG): nlds.plot("%s_%d" % (fn, k))
    (Sta, Obs) = nlds.gen(len(data))
    err = tl.RMSE(Obs, data)
    if (DBG): print("nlds:fit_k_lin (k:%d):rmse: %f" % (k, err))
    return (nlds, err)
Beispiel #17
0
    def test_calling_update_decreases_state_uncertainty(self):
        x = 0.2
        v = 0.8
        meas_value = 0.1
        meas_variance = 0.1

        kf = KF(x, v, accel_variance=1.2)

        for i in range(10):
            det_cov_before = np.linalg.det(kf.cov)
            #print(det_cov_before)
            #print("---")
            kf.update(meas_value, meas_variance)
            det_cov_after = np.linalg.det(kf.cov)
            #print(det_cov_after)
            #print(" ")

            self.assertLess(det_cov_after, det_cov_before)
Beispiel #18
0
@author: Praveen
"""

import numpy as np
import matplotlib.pyplot as plt
from kf import KF
plt.ion()#Turn the interactive mode on
plt.figure()#use plt.figure() when we want to tweak the size of the figure and 
#when we want to add multiple Axes objects in a single figure.

real_x=0.0#encoder angle
meas_variance=0.1**2#simulate noise in our measurement
real_v=0.5

kf=KF(initial_x=0.0,initial_v=1.0,accel_variance=0.1)

DT=0.1
NUM_STEPS=1000
MEAS_EVERY_STEP = 20

mus=[]#creating a list for storing the mean
covs=[]#creating a list for storing the covariance
real_xs = []
real_vs = []


for step in range(NUM_STEPS):
    covs.append(kf.cov)
    mus.append(kf.mean)
Beispiel #19
0
import pdb
import scipy.stats as stats

import matplotlib.pyplot as plt

set_seed(9001)

dt = 1e-3
T = 60.

z = TimeVarying(dt, 0.0, 1.0, f=1 / 20)
F_hat = lambda t: z.F(0)
eta = lambda t: F_hat(t) - z.F(t)

print(F_hat(0))
f1 = KF(z.x0, F_hat, z.H, z.Q, z.R, dt)
f2 = LKF(z.x0, F_hat, z.H, z.Q, z.R, dt, tau=0.25, eps=3e-2, gamma=0.9)

max_err = 2.
max_eta_err = 100
max_zz = 100.

hist_t = []
hist_z = []
hist_f1_x = []
hist_f2_x = []
hist_f1_err = []
hist_f2_err = []
hist_eta = []
while z.t <= T:
    z_t = z()
Beispiel #20
0
def _track(net, meta, data, output_path):
    first_frame = next(data)
    init_states = {}
    track_id = -1
    # initialize tracker
    detections = _detect_people(net, meta, first_frame)
    for detection in detections:
        track_id += 1
        cx, cy, w, h = detection[2]
        init_states[track_id] = np.array([cx, cy, w * h, w / h, 0, 0, 0])

    filt = KF(init_states)
    _output_tracks(filt.latest_live_states(), first_frame, output_path)

    # process remaining frames
    for frame in data:
        # predict motion of BB for existing tracks
        predictions = filt.predict()
        bbs = list(map(lambda d: d[2], _detect_people(net, meta, frame)))
        keys = list(predictions.keys())
        iter_bounds = max(len(keys), len(bbs))
        # Hungarian method for assignment
        # first build cost matrix
        cost_mat = np.zeros((iter_bounds, iter_bounds))
        for i in range(min(iter_bounds, len(bbs))):
            for j in range(min(iter_bounds, len(keys))):
                cost_mat[i, j] = _iou(bbs[i],
                                      _state_to_bb(predictions[keys[j]]))
        # TODO put optimizer call in for loop condition
        # then solve the optimization problem
        rows, cols = optimize.linear_sum_assignment(cost_mat, maximize=True)
        # assign detections to old or new tracks, as appropriate
        # (r,c) indexes an IOU in cost_mat, r coresponds to a detection bb
        # c is a track from the previous frame
        assignments = {}
        new_tracks = {}
        for r, c in zip(rows, cols):
            if r < len(bbs):
                cx, cy, w, h = bbs[r]
            else:
                continue
            state = np.array([cx, cy, w * h, w / h, 0, 0, 0])
            if cost_mat[r, c] >= MIN_IOU:  # new detection for existing track
                assignments[keys[c]] = state
            else:  # new track
                track_id += 1
                new_tracks[track_id] = state
        # build the measurements
        #_output_tracks(assignments, frame, output_path, prefix='assignment')
        ys = {}
        for label, last_state in filt.latest_live_states().items():
            if label in assignments:
                astate = assignments[label]
                ys[label] = np.array([
                    astate[0], astate[1], astate[2], astate[3],
                    astate[0] - last_state[0], astate[1] - last_state[1],
                    astate[2] - last_state[2]
                ])
            else:
                filt.kill_state(label)

        filt.update(ys, predictions)
        for label, state in new_tracks.items():
            filt.birth_state(label, state)
        _output_tracks(filt.latest_live_states(), frame, output_path)
Beispiel #21
0
	def __init__(self, x0: np.ndarray, F: Callable, H: np.ndarray, Q: np.ndarray, R: np.ndarray, dt: float, tau: float = float('inf'), eps=1e-4):
		self.F = F
		self.H = H
		self.Q = Q
		self.R = R
		self.dt = dt
		self.tau = tau
		self.eps = eps
		self.eta_mu = eta_mu
		self.eta_var = eta_var
		self.ndim = x0.shape[0]

		self.e_zz_t = np.zeros((self.ndim, self.ndim)) # temp var..
		self.p_inv_t = np.zeros((self.ndim, self.ndim)) # temp var..
		self.kf_err_hist = []
		self.kf = KF(x0, F, H, Q, R, dt)

		def f(t, state, z_t, err_hist, F_t):
			# TODO fix all stateful references in this body

			x_t, P_t, eta_t = self.load_vars(state)
			z_t = z_t[:, np.newaxis]
			K_t = [email protected]@np.linalg.inv(self.R)

			d_eta = np.zeros((self.ndim, self.ndim)) 
			if t > self.tau: # TODO warmup case?
				H_inv = np.linalg.inv(self.H)
				P_kf_t = self.kf.P_t
				P_inv = np.linalg.solve(P_kf_t.T@P_kf_t + self.eps*np.eye(self.ndim), P_kf_t.T)
				self.p_inv_t = P_inv

				tau_n = int(self.tau / self.dt)
				err_t, err_tau = err_hist[-1][:,np.newaxis], err_hist[-tau_n][:,np.newaxis]
				d_zz = (err_t@err_t.T - err_tau@err_tau.T) / self.tau
				self.e_zz_t = d_zz 
				# E_z = sum(err_hist[-tau_n:])[:,np.newaxis] / tau_n
				# d_uu = ((err_t - err_tau)/self.tau)@(E_z.T) + E_z@(((err_t - err_tau)/self.tau).T)
				# self.e_zz_t = d_zz - d_uu

				# if np.linalg.norm(d_zz) >= 1.0:
				# 	d_zz = np.zeros((self.ndim, self.ndim))

				d_eta = (H_inv@d_zz@H_inv.T@P_inv / 2 - eta_t) / self.tau

			F_est = F_t - eta_t
			d_x = F_est@x_t + K_t@(z_t - self.H@x_t)
			d_P = F_est@P_t + P_t@F_est.T + self.Q - [email protected]@K_t.T
			d_state = np.concatenate((d_x, d_P, d_eta), axis=1)
			return d_state.ravel() # Flatten for integrator

		def g(t, state):
			return np.zeros((self.ode_ndim, self.ode_ndim))

		# state
		x0 = x0[:, np.newaxis]
		self.x_t = x0
		# covariance
		P0 = np.eye(self.ndim)
		self.P_t = P0
		# model variation
		eta0 = np.zeros((self.ndim, self.ndim))
		self.eta_t = eta0

		iv = np.concatenate((x0, P0, eta0), axis=1).ravel() # Flatten for integrator
		self.r = Integrator(f, g, self.ode_ndim)
		self.r.set_initial_value(iv, 0.)
Beispiel #22
0
import numpy as np
import matplotlib.pyplot as plt

from kf import KF

plt.ion()
plt.figure()

# the actual values

real_x = 0.0
meas_variance = 0.1**2
real_v = 0.5

kf = KF(0.0, 1.0, 0.1)

DT = 0.1
NUM_STEPS = 1000
MEAS_EVERY_STEPS = 20

mus = []
covs = []
real_xs = []
real_vs = []

for step in range(NUM_STEPS):

    #just a little test
    if step > 500:
        real_v *= 0.9
Beispiel #23
0
	def __init__(self, x0: np.ndarray, F: Callable, H: np.ndarray, Q: np.ndarray, R: np.ndarray, dt: float, tau: float = float('inf'), eta_mu: float = 0., eta_var: float = 1., eps=1e-4):
		self.F = F
		self.H = H
		self.Q = Q
		self.R = R
		self.dt = dt
		self.tau = tau
		self.eps = eps
		self.eta_mu = eta_mu
		self.eta_var = max(eta_var, 1e-3) # to avoid singular matrix
		self.ndim = x0.shape[0]

		self.err_hist = []
		self.eta_t = np.zeros((self.ndim, self.ndim)) # temp var..
		self.e_zz_t = np.zeros((self.ndim, self.ndim)) # temp var..
		self.p_inv_t = np.zeros((self.ndim, self.ndim)) # temp var..
		self.p_t = np.zeros((self.ndim, self.ndim)) # temp var..

		eta0 = eta_mu * np.ones(4)
		eta_F = lambda _: np.zeros((4,4))
		eta_H = np.eye(4)
		eta_Q = np.zeros((4,4))
		eta_R = eta_var*np.eye(4)
		self.eta_filter = KF(eta0, eta_F, eta_H, eta_Q, eta_R, self.dt)

		def f(t, state, z_t, err_hist, F_t):
			# TODO fix all stateful references in this body

			state = state.reshape(self.ode_shape)
			x_t, P_t, eta_t = state[:, :1], state[:, 1:1+self.ndim], state[:, 1+self.ndim:]
			z_t = z_t[:, np.newaxis]
			K_t = [email protected]@np.linalg.inv(self.R)

			d_eta = np.zeros((self.ndim, self.ndim)) 
			if t > self.tau: # TODO warmup case?
				H_inv = np.linalg.inv(self.H)
				P_inv = np.linalg.solve(P_t.T@P_t + self.eps*np.eye(self.ndim), P_t.T)
				self.p_inv_t = P_inv

				tau_n = int(self.tau / self.dt)
				err_t, err_tau = err_hist[-1][:,np.newaxis], err_hist[-tau_n][:,np.newaxis]
				d_zz = (err_t@err_t.T - err_tau@err_tau.T) / self.tau
				self.e_zz_t = d_zz

				# if np.linalg.norm(d_zz) >= 1.0:
				# 	d_zz = np.zeros((self.ndim, self.ndim))

				# E_z = sum(err_hist[-tau_n:])[:,np.newaxis] / tau_n
				# d_uu = ((err_t - err_tau)/self.tau)@(E_z.T) + E_z@(((err_t - err_tau)/self.tau).T)
				# self.e_zz_t = d_zz - d_uu

				d_eta_new = H_inv@d_zz@H_inv.T@P_inv / 2 - eta_t
				# d_eta_new = self.eta_filter(d_eta_new.ravel())[0].reshape((2,2))
				d_eta = d_eta_new / self.tau


				# alpha = self.f_eta(d_eta_new) / self.f_eta(np.zeros((2,2)))
				# if stats.uniform.rvs() <= alpha:
				# 	d_eta = d_eta_new / self.dt

			F_est = F_t - eta_t
			d_x = F_est@x_t + K_t@(z_t - self.H@x_t)
			d_P = F_est@P_t + P_t@F_est.T + self.Q - [email protected]@K_t.T
			d_state = np.concatenate((d_x, d_P, d_eta), axis=1)
			return d_state.ravel() # Flatten for integrator

		def g(t, state):
			return np.zeros((self.ode_ndim, self.ode_ndim))

		# state
		x0 = x0[:, np.newaxis]
		self.x_t = x0
		# covariance
		P0 = np.eye(self.ndim)
		self.P_t = P0
		# model variation
		eta0 = np.zeros((self.ndim, self.ndim))
		self.eta_t = eta0

		iv = np.concatenate((x0, P0, eta0), axis=1).ravel() # Flatten for integrator
		self.r = Integrator(f, g, self.ode_ndim)
		self.r.set_initial_value(iv, 0.)
Beispiel #24
0
    def test_update_ok(self):
        x = 0.2
        v = 2.3

        kf = KF(initial_x=x, initial_v=v, accel_variance=1.2)
        kf.update(meas_value=0.1, meas_variance=0.1)
Beispiel #25
0
    observations = 20 * (np.sin(x) + 0.005 * np.random.random(n))
    data[:,0]=observations
    return (data) 
 
#----------------------------------#
#     main 
#----------------------------------#
if __name__ == "__main__":
    from kf import KF
    tl.comment("kf.py -- example ")
    tl.msg("create synthetic sequence (1-dim)")
    (data)=_example1()
    fn='../output/tmp/kf_sample'
    
   
    k=2 #4 
    tl.msg("k=%d: # of latent variables"%(k)) 
    tl.msg("init params")
    mykf=KF(data,k)
    tl.msg("em algorithm -- START")
    tic = tl.time.clock()
    mykf.em(ITER=2, iter_each=10)
    toc = tl.time.clock(); fittime= toc-tic;
    tl.msg("em algorithm -- END (%f sec.)"%(fittime))
    tl.msg("plot/save models")
    mykf.plot(fn)
    mykf.printParams(fn)



Beispiel #26
0
 def test_can_construct_with_x_and_v(self):
     x = 0.2
     v = 2.3
     kf = KF(x, v, 1.2)
     self.assertAlmostEqual(kf.pos, x)
     self.assertAlmostEqual(kf.vel, v)
Beispiel #27
0
import numpy as np
import matplotlib.pyplot as plt
from SIFT import getSIFT, getMatch
from PIL import Image
from kf import KF
import cv2
plt.ion()
plt.figure()
position = [1, 1, 0, 0, 1, 1, 0, 0]
velocity = [0, 0, 1, 1, 0, 0, 1, 1]
frames = []
meas_variance = np.random.rand(4, 4) * 0.01

initial_state = np.array([200, 113, 0, 0, 45, 45, 0, 0]).T
accel_variance = np.ones((8, 8))  # *np.random.rand()*0.1
kf = KF(initial_state=initial_state, accel_variance=accel_variance)

# [real_X, real_Y, real_W, real_H] = initial_state[position]

DT = 0.008
NUM_STEPS = 200
# MEAS_EVERY_STEPS = 20

mus = []
covs = []
real_xs = []
real_vs = []
im = Image.open("./image.jpg")
prevImg = cv2.imread("./Vid_A_ball/img0002.jpg")
prevMask = np.zeros((prevImg.shape[0], prevImg.shape[1])).astype(np.uint8)
x = 200