Example #1
0
    def test_can_predict(self):
        x = 0.2
        v = 2.3

        kf = KF(initial_x=x, initial_v=v, accel_variance=1.2)

        kf.predict(dt=0.1)
Example #2
0
    def test_after_calling_predict_mean_and_cov_are_of_right_shape(self):
        x = 0.2
        v = 2.3
        kf = KF(x, v, 1.2)
        kf.predict(0.1)

        self.assertEqual(kf.cov.shape, (2,2))
        self.assertEqual(kf.mean.shape, (2, ))
Example #3
0
    def test_after_calling_predict_mean_and_cov_are_of_right_shape(self):
        x = 0.2
        v = 2.3

        kf = KF(initial_x=x, initial_v=v, accel_variance=1.2)

        kf.predict(dt=0.1)
        self.assertEqual(kf.cov.shape, (2, 2))
        self.assertEqual(kf.mean.shape, (2, ))
Example #4
0
    def test_can_predict_and_mean_and_cov_are_of_right_shape(self):
        x = 0.2
        v = 0.8

        kf = KF(x, v, accel_variance=1.2)
        kf.predict(dt=0.1)

        self.assertEqual(kf.cov.shape, (2, 2))
        self.assertEqual(kf.mean.shape, (2, ))
Example #5
0
    def test_calling_predict_increases_state_uncertainty(self):
        x = 0.2
        v = 2.3
        kf = KF(x, v, 1.2)

        for i in range(10):
            det_before = np.linalg.det(kf.cov)
            kf.predict(0.1)
            det_after = np.linalg.det(kf.cov)

            self.assertGreater(det_after, det_before)
            print(det_before, det_after)
Example #6
0
    def test_after_calling_predict_increases_state_uncertainty(self):
        x = 0.2
        v = 2.3
        temp = 0
        kf = KF(initial_x=x, initial_v=v, accel_variance=1.2)
        for i in range(10):
            #l'entropie de la matrice de covariance est exprimé par son determinant, ici je m'attend à obtenir un determinant qui croit.
            #l'incertitude devant être de plus en plus grande au cours du temps.
            det_before = np.linalg.det(kf.cov)
            kf.predict(dt=0.1)
            det_after = np.linalg.det(kf.cov)

            self.assertGreater(det_after, det_before)
Example #7
0
    def test_after_calling_predict_increases_state_uncertainty(self):
        x = 0.2
        v = 2.3

        kf = KF(initial_x=x, initial_v=v, accel_variance=1.2)

        for i in range(10):
            det_before = np.linalg.det(kf.cov)
            kf.predict(dt=0.1)
            det_after = np.linalg.det(kf.cov)

            self.assertGreater(det_after, det_before)
            print(det_before, det_after)
Example #8
0
    def test_calling_predict_increases_state_uncertainty(self):
        x = 0.2
        v = 0.8

        kf = KF(x, v, accel_variance=1.2)

        for i in range(10):
            det_cov_before = np.linalg.det(kf.cov)
            #print(det_cov_before)
            #print("---")
            kf.predict(dt=0.1)
            det_cov_after = np.linalg.det(kf.cov)
            #print(det_cov_after)
            #print(" ")

            self.assertGreater(det_cov_after, det_cov_before)
Example #9
0
MEAS_EVERY_STEP = 20

mus=[]#creating a list for storing the mean
covs=[]#creating a list for storing the covariance
real_xs = []
real_vs = []


for step in range(NUM_STEPS):
    covs.append(kf.cov)
    mus.append(kf.mean)

    
    real_x = real_x + DT * real_v
    
    kf.predict(dt=DT)
    if step != 0 and step%MEAS_EVERY_STEP == 0:
        kf.update(meas_value = real_x + np.random.randn()*np.sqrt(meas_variance) , meas_variance = meas_variance)
    real_xs.append(real_x)
    real_vs.append(real_v)
"""     
plt.subplot(2,1,1)
plt.title("Position")
plt.plot([mu[0] for mu in mus],'r')#'r'--> denotes the red color of the line
plt.plot(real_xs,'b')
#FOr creating disturbances
plt.plot([mu[0]-2*np.sqrt(cov[0,0])for mu,cov in zip(mus,covs)],'r--')#np.sqrt is used to find the square root of a list
#'r--" denotes the red color dotted line
plt.plot([mu[0]+2*np.sqrt(cov[0,0])for mu,cov in zip(mus,covs)],'r--')

"""
Example #10
0
def _track(net, meta, data, output_path):
    first_frame = next(data)
    init_states = {}
    track_id = -1
    # initialize tracker
    detections = _detect_people(net, meta, first_frame)
    for detection in detections:
        track_id += 1
        cx, cy, w, h = detection[2]
        init_states[track_id] = np.array([cx, cy, w * h, w / h, 0, 0, 0])

    filt = KF(init_states)
    _output_tracks(filt.latest_live_states(), first_frame, output_path)

    # process remaining frames
    for frame in data:
        # predict motion of BB for existing tracks
        predictions = filt.predict()
        bbs = list(map(lambda d: d[2], _detect_people(net, meta, frame)))
        keys = list(predictions.keys())
        iter_bounds = max(len(keys), len(bbs))
        # Hungarian method for assignment
        # first build cost matrix
        cost_mat = np.zeros((iter_bounds, iter_bounds))
        for i in range(min(iter_bounds, len(bbs))):
            for j in range(min(iter_bounds, len(keys))):
                cost_mat[i, j] = _iou(bbs[i],
                                      _state_to_bb(predictions[keys[j]]))
        # TODO put optimizer call in for loop condition
        # then solve the optimization problem
        rows, cols = optimize.linear_sum_assignment(cost_mat, maximize=True)
        # assign detections to old or new tracks, as appropriate
        # (r,c) indexes an IOU in cost_mat, r coresponds to a detection bb
        # c is a track from the previous frame
        assignments = {}
        new_tracks = {}
        for r, c in zip(rows, cols):
            if r < len(bbs):
                cx, cy, w, h = bbs[r]
            else:
                continue
            state = np.array([cx, cy, w * h, w / h, 0, 0, 0])
            if cost_mat[r, c] >= MIN_IOU:  # new detection for existing track
                assignments[keys[c]] = state
            else:  # new track
                track_id += 1
                new_tracks[track_id] = state
        # build the measurements
        #_output_tracks(assignments, frame, output_path, prefix='assignment')
        ys = {}
        for label, last_state in filt.latest_live_states().items():
            if label in assignments:
                astate = assignments[label]
                ys[label] = np.array([
                    astate[0], astate[1], astate[2], astate[3],
                    astate[0] - last_state[0], astate[1] - last_state[1],
                    astate[2] - last_state[2]
                ])
            else:
                filt.kill_state(label)

        filt.update(ys, predictions)
        for label, state in new_tracks.items():
            filt.birth_state(label, state)
        _output_tracks(filt.latest_live_states(), frame, output_path)
Example #11
0
covs = []
real_xs = []
real_vs = []

for step in range(NUM_STEPS):

    #just a little test
    if step > 500:
        real_v *= 0.9

    covs.append(kf.cov)
    mus.append(kf.mean)

    real_x = real_x + DT * real_v

    kf.predict(DT)
    if step != 0 and step % MEAS_EVERY_STEPS == 0:
        kf.update(real_x + np.random.randn() * np.sqrt(meas_variance),
                  meas_variance)

    real_xs.append(real_x)
    real_vs.append(real_v)

plt.subplot(2, 1, 1)
plt.title('Position')
plt.plot([mu[0] for mu in mus], 'r')
plt.plot(real_xs, 'b')
plt.plot([mu[0] - 2 * np.sqrt(cov[0, 0]) for mu, cov in zip(mus, covs)], 'r--')
plt.plot([mu[0] + 2 * np.sqrt(cov[0, 0]) for mu, cov in zip(mus, covs)], 'r--')

plt.subplot(2, 1, 2)