Exemple #1
0
class EKFSLAM(object):
    # Construct an EKF instance with the following set of variables
    #    mu:                 The initial mean vector
    #    Sigma:              The initial covariance matrix
    #    R:                  The process noise covariance
    #    Q:                  The measurement noise covariance
    #    visualize:          Boolean variable indicating whether to visualize
    #                        the filter
    def __init__(self, mu, Sigma, R, Q, visualize=True):
        self.mu = mu
        self.Sigma = Sigma
        self.R = R
        self.Q = Q
        self.nfeatures = 0

        # step 6
        # expand matrix to 3,3
        #self.Q = np.pad(self.Q, ((0,1),(0,1)), mode = 'constant',
        #                                 constant_values = 0.0)

        # You may find it useful to keep a dictionary that maps a
        # a feature ID to the corresponding index in the mean_pose_handle
        # vector and covariance matrix
        self.mapLUT = {}

        self.visualize = visualize
        if self.visualize == True:
            self.vis = Visualization()
        else:
            self.vis = None

    # Visualize filter strategies
    #   deltat:  Step size
    #   XGT:     Array with ground-truth pose
    def render(self, XGT=None):
        deltat = 0.1
        self.vis.drawEstimates(self.mu, self.Sigma)
        if XGT is not None:
            #print XGT
            self.vis.drawGroundTruthPose(XGT[0], XGT[1], XGT[2])
        plt.pause(deltat)

    def mu_extended(self, mu, n):
        placeholders = np.array([[0.0] * n]).ravel()
        return np.concatenate([mu, placeholders])

    # Perform the prediction step to determine the mean and covariance
    # of the posterior belief given the current estimate for the mean
    # and covariance, the control data, and the process model
    #    u: The forward distance and change in heading
    def prediction(self, u):
        u1, u2 = u
        v1, v2 = np.random.multivariate_normal([0.0, 0.0], self.R)

        # 10.2.3
        F_x = np.concatenate(
            [np.eye(3), np.zeros((3, self.nfeatures * 3))], axis=1)

        # put mu in higher dimensional space
        #mu = self.mu_extended(self.mu)

        mu = self.mu.ravel()[:3]

        x, y, theta = mu

        noise_jac = np.array([[cos(theta), 0], [sin(theta), 0], [0, 1]])

        mot = []  # column vector of len 3
        # apply motion model (eq 8 in pset), 10.2.4 in book
        mot.append(x + (u1 + v1) * cos(theta))
        mot.append(y + (u1 + v1) * sin(theta))
        mot.append(theta + u2 + v2)
        mot = np.array(mot)

        # jacobian of motion model f(x,y,d,t)
        #F = np.array([[1.0, 0.0, np.cos(self.mu[2]), -(u1 + v1) * np.sin(self.mu[2])],
        #             [0.0, 1.0, np.sin(self.mu[2]), (u1 + v1) * np.cos(self.mu[2])],
        #             [0.0, 0.0, 1.0, 1.0]])
        # 10.16
        g_t = np.array([[1.0, 0.0, cos(theta)], [0.0, 1.0,
                                                 sin(theta)], [0.0, 0.0, 1.0]])

        #sigma_new = np.matmul(F, self.Sigma)
        #sigma_new = np.matmul(sigma_new, F.T)
        #self.Sigma = sigma_new + self.R

        # noise jacobian

        # following from pp 314 in prob. robotics, s10.2

        mubar_t = mu + np.matmul(F_x.T, mot)  # 10.2.4

        I = np.eye(((3 * self.nfeatures) + 3))

        # step 4
        G_t = I + np.matmul(np.matmul(F_x.T, g_t), F_x)
        R_t = np.matmul(noise_jac, np.matmul(self.R, noise_jac.T))

        sigmabar_t = (np.matmul(np.matmul(G_t, self.Sigma), G_t.T) +
                      np.matmul(np.matmul(F_x.T, R_t), F_x))

        self.Sigma = sigmabar_t
        self.mu = mubar_t
        print self.mu.shape

    # Perform the measurement update step to compute the posterior
    # belief given the predictive posterior (mean and covariance) and
    # the measurement data
    #    z:     The (x,y) position of the landmark relative to the robot, and
    #    i:     The ID of the observed landmark
    def update(self, z, i):
        z_x, z_y = z

        x, y, theta = self.mu.ravel()[:3]

        if i not in self.mapLUT:
            w1, w2 = np.random.multivariate_normal([0.0, 0.0], self.Q)
            i_loc = self.mu.shape[0]
            self.mapLUT[i] = {'x': i_loc, 'y': i_loc + 1}
            self.mu = self.mu_extended(self.mu, 2)
            self.nfeatures += 2
            self.mu[i_loc] = cos(theta) * z_x - sin(theta) * z_y - x + w1
            self.mu[i_loc + 1] = sin(theta) * z_x - cos(theta) * z_y - y + w2
            #self.mu = np.pad(self.mu, ((0,1),(0,1)), mode = 'constant',
            #                constant_values = 0.0)

            #mu[i_loc,:2] = (np.matmul(np.array([[cos(theta), sin(theta)],
            #                    -sin(theta), cos(theta)]),
            #                    np.array([[z_x - x, z_y - y]])) +
            #                   np.random.multivariate_normal([0.0,0.0], self.Q))

        #else:
        #    i_loc = self.mapLUT[i]

        x_idx = self.mapLUT[i]['x']
        y_idx = self.mapLUT[i]['y']

        j = x_idx + 1
        N = self.mu.shape[0] + 1

        delta = np.array([self.mu[x_idx] - x, self.mu[y_idx] - y])

        q = np.matmul(delta.T, delta)

        # measurement jacobian

        H = np.array([[
            -1.0, 0.0, -z_x * cos(theta) - z_x * sin(theta),
            cos(theta), -sin(theta)
        ],
                      [
                          0.0, -1.0, -z_y * sin(theta) + z_x * cos(theta),
                          sin(theta), -cos(theta)
                      ]])

        # matrix to map to higher dimensions

        F_l = np.pad(np.eye(3), ((0, 2), (0, 2 * j - 2)),
                     'constant',
                     constant_values=0.0)
        F_r = np.pad(np.eye(3), ((2, 0), (0, 2 * N - 2 * j)),
                     'constant',
                     constant_values=0.0)

        F = np.concatenate([F_l, F_r], axis=1)

        H = np.matmul(H, F)

        right = np.linalg.inv(
            np.matmul(np.matmul(H, self.Sigma), H.T) + self.Q)
        left = np.matmul(self.Sigma, H.T)

        K = np.matmul(left, right)

        # measurement model + measure jac

        #z_i = np.matmul(np.array([[cos(theta), sin(theta)],
        #                -sin(theta), cos(theta)]),

        #x,y, theta = self.mu[i_loc:i_loc + 3,]

        # expand mu

        #self.mu[i_loc,]

        #print H.shape
        #print self.Sigma.shape
        #print self.Q.shape
        #print H.T.shape
        #left = np.matmul(self.Sigma, H.T)
        #right = np.linalg.inv(np.matmul(np.matmul(H,self.Sigma), H.T) + self.Q)
        #raise TypeError
        #K = np.matmul(left, right)

        #w = np.random.multivariate_normal([0.0, 0.0], self.Q)

        #measure_model = np.array([[cos(theta), sin(theta)],
        #                        -sin(theta), cos(theta)])
        #measure_model = np.matmul(measure_model,
        #                 np.array([[z_x - x, z_y - y]]))  + w

        #self.mu = self.mu + np.matmul(K, z - measure_model)

        #self.Sigma = np.matmul((np.eye(K.shape) - np.matmul(K,H)),
        #                       self.Sigma)

    # Augment the state vector to include the new landmark
    #    z:     The (x,y) position of the landmark relative to the robot
    #    i:     The ID of the observed landmark
    def augmentState(self, z, i):

        # define h
        #jacob = np.array([[-1, -np.cos(self.mu[2]), -sin(self.mu[2])],
        #                [-1, np.sin(self.mu[2]), -np.cos(self.mu[2])]]) d

        x, y, theta = self.mu
        z1, z2 = z

        G = np.array([
            [
                -1.0, 0.0, -z2 * np.cos(theta) - z1 * np.sin(theta),
                np.cos(theta), -np.sin(theta)
            ],
            [
                0.0, -1.0, -z2 * np.sin(theta) + z1 * np.cos(theta),
                np.sin(theta), -np.cos(theta)
            ],
        ])
        if i not in self.mapLUT:

            mu_new = np.array(
                [[self.mu],
                 [
                     np.cos(theta) * z1 - np.sin(theta * z2 - x1 + w1),
                     np.sin(theta) * z1 - np.cos(theta * z2 - x2 + w2)
                 ]])

            sigma_new = np.array(
                [[self.Sigma, np.matmul(self.Sigma, G.T)],
                 [
                     np.matmul(G, self.Sigma),
                     np.matmul(np.matmul(G, self.Sigma), G.T) + self.Q
                 ]])
            self.mapLUT[i] = [mu_new, sigma_new]

    # Runs the EKF SLAM algorithm
    #   U:        Array of control inputs, one column per time step
    #   Z:        Array of landmark observations in which each column
    #             [t; id; x; y] denotes a separate measurement and is
    #             represented by the time step (t), feature id (id),
    #             and the observed (x, y) position relative to the robot
    #   XGT:      Array of ground-truth poses (may be None)
    def run(self, U, Z, XGT=None, MGT=None):

        # Draws the ground-truth map
        if MGT is not None:
            self.vis.drawMap(MGT)

        # Iterate over the data
        for t in range(U.shape[1]):
            u = U[:, t]
            num_match = np.where(Z[0, :] == t)
            z = Z[:, np.where(Z[0, :] == t)]

            self.prediction(u)

            for k in range(z.shape[2]):
                k = z[:, :, k].ravel()
                self.update(k[2:], k[1])

            # You may want to call the visualization function
            # between filter steps
            if self.visualize:
                if XGT is None:
                    self.render(None)
                else:
                    self.render(XGT[:, t])
Exemple #2
0
class EKFSLAM(object):
    # Construct an EKF instance with the following set of variables
    #    mu:                 The initial mean vector
    #    Sigma:              The initial covariance matrix
    #    R:                  The process noise covariance
    #    Q:                  The measurement noise covariance
    #    visualize:          Boolean variable indicating whether to visualize
    #                        the filter
    def __init__(self, mu, Sigma, R, Q, visualize=True):
        self.mu = mu
        self.Sigma = Sigma
        self.R = R
        self.Q = Q

        # You may find it useful to keep a dictionary that maps a
        # a feature ID to the corresponding index in the mean_pose_handle
        # vector and covariance matrix
        self.mapLUT = {}

        self.visualize = visualize
        if self.visualize == True:
            self.vis = Visualization()
        else:
            self.vis = None




    # Visualize filter strategies
    #   deltat:  Step size
    #   XGT:     Array with ground-truth pose
    def render(self, XGT=None):
        deltat = 0.1
        self.vis.drawEstimates(self.mu, self.Sigma)
        if XGT is not None:
            #print XGT
            self.vis.drawGroundTruthPose (XGT[0], XGT[1], XGT[2])
        plt.pause(deltat/10)





    # Perform the prediction step to determine the mean and covariance
    # of the posterior belief given the current estimate for the mean
    # and covariance, the control data, and the process model
    #    u:                 The forward distance and change in heading
    def prediction(self, u):

        F = np.zeros((3, 3))
        noise = np.random.normal(0, self.R[0, 0])

        F[0, 0] = 1
        F[1, 1] = 1
        F[2, 2] = 1
        F[0, 2] = -1 * (u[0]+noise) * np.sin(self.mu[2])
        F[1, 2] = (u[0]+noise) * np.cos(self.mu[2])

        u1 = u[0]
        u2 = u[1]
        self.mu[0] = self.mu[0] + u1*np.cos(self.mu[2])
        self.mu[1] = self.mu[1] + u1*np.sin(self.mu[2])
        self.mu[2] = self.mu[2] + u2

        upper_left = self.Sigma[0:3, 0:3]
        upper_right = self.Sigma[0:3, 3:]
        bot_left = self.Sigma[3:, 0:3]
        bot_right = self.Sigma[3:, 3:]

        temp = np.matmul(F, upper_left)
        temp = np.matmul(temp, np.transpose(F))
        temp[0, 0] = temp[0, 0] + self.R[0, 0]
        temp[1, 1] = temp[1, 1] + self.R[0, 0]
        temp[2, 2] = temp[2, 2] + self.R[1, 1]
        upper_left = temp

        upper_right = np.matmul(F, upper_right)

        bot_left = np.matmul(bot_left, np.transpose(F))

        up = np.concatenate((upper_left, upper_right), axis=1)
        bot = np.concatenate((bot_left, bot_right), axis=1)
        if not bot.shape[1] == 0:
            self.Sigma = np.concatenate((up, bot), axis=0)

        



    # Perform the measurement update step to compute the posterior
    # belief given the predictive posterior (mean and covariance) and
    # the measurement data
    #    z:     The (x,y) position of the landmark relative to the robot
    #    i:     The ID of the observed landmark
    def update(self, z, i):

        mIdx = self.mapLUT[i]
        xt = self.mu[0]
        yt = self.mu[1]
        thetat = self.mu[2]
        xm = self.mu[mIdx]
        ym = self.mu[mIdx+1]

        H = np.zeros((2, self.mu.shape[0]))
        H[0, 0] = (-1)*np.cos(thetat)
        H[0, 1] = (-1)*np.sin(thetat)
        H[0, 2] = (-1)*xm*np.sin(thetat) + xt*np.sin(thetat) + ym*np.cos(thetat) - yt*np.cos(thetat)
        H[0, mIdx] = np.cos(thetat)
        H[0, mIdx+1] = np.sin(thetat)

        H[1, 0] = np.sin(thetat)
        H[1, 1] = (-1)*np.cos(thetat)
        H[1, 2] = (-1)*xm*np.cos(thetat) + xt*np.cos(thetat) - ym*np.sin(thetat) + yt*np.sin(thetat)
        H[1, mIdx] = (-1)*np.sin(thetat)
        H[1, mIdx+1] = np.cos(thetat)

        inv_temp = np.matmul(H, self.Sigma)
        inv_temp = np.matmul(inv_temp, np.transpose(H))
        inv_temp = inv_temp + self.Q
        temp = np.matmul(self.Sigma, np.transpose(H))
        k = np.matmul(temp, inv(inv_temp))

        temp0 = xm*np.cos(thetat) - xt*np.cos(thetat) + ym*np.sin(thetat) - yt*np.sin(thetat)
        temp1 = (-1)*xm*np.sin(thetat) + xt*np.sin(thetat) + ym*np.cos(thetat) - yt*np.cos(thetat)
        hu = np.array([temp0[0], temp1[0]])
        gain = np.matmul(k, z - hu)
        gain = np.reshape(gain, (self.mu.shape[0], 1))

        self.mu = self.mu + gain

        I = np.eye(self.mu.shape[0])
        temp = np.matmul(k, H)
        temp = I - temp
        self.Sigma = np.matmul(temp, self.Sigma)



    # Augment the state vector to include the new landmark
    #    z:     The (x,y) position of the landmark relative to the robot
    #    i:     The ID of the observed landmark
    def augmentState(self, z, i):

        self.mapLUT[i] = 3 + 2*len(self.mapLUT)
        G = np.zeros((2, self.mu.shape[0]))
        G[0, 0] = 1
        G[0, 2] = (-1)*z[0] * np.sin(self.mu[2]) - z[1] * np.cos(self.mu[2])
        G[1, 1] = 1
        G[1, 2] = z[0] * np.cos(self.mu[2]) - z[1] * np.sin(self.mu[2])

        xm = np.array([self.mu[0] + z[0] * np.cos(self.mu[2]) - z[1] * np.sin(self.mu[2])])
        ym = np.array([self.mu[1] + z[0] * np.sin(self.mu[2]) + z[1] * np.cos(self.mu[2])])

        self.mu = np.concatenate((self.mu, xm), axis=0)
        self.mu = np.concatenate((self.mu, ym), axis=0)

        
        upper_left = self.Sigma

        upper_right = np.matmul(self.Sigma, np.transpose(G))

        bot_left = np.matmul(G, self.Sigma)

        temp = np.matmul(G, self.Sigma)
        temp = np.matmul(temp, np.transpose(G))
        bot_right = temp + self.Q

        up = np.concatenate((upper_left, upper_right), axis=1)
        bot = np.concatenate((bot_left, bot_right), axis=1)
        if not bot.shape[1] == 0:
            self.Sigma = np.concatenate((up, bot), axis=0)


        # Update mapLUT to include the new landmark



    # Runs the EKF SLAM algorithm
    #   U:        Array of control inputs, one column per time step
    #   Z:        Array of landmark observations in which each column
    #             [t; id; x; y] denotes a separate measurement and is
    #             represented by the time step (t), feature id (id),
    #             and the observed (x, y) position relative to the robot
    #   XGT:      Array of ground-truth poses (may be None)
    def run(self, U, Z, XGT=None, MGT=None):
   
        # Draws the ground-truth map
        if MGT is not None:
            self.vis.drawMap (MGT)

        print("init")
        print(self.mu)
        print(self.R)
        print(self.Q)
        # Iterate over the data
        zIdx = 0
        for t in range(U.shape[1]):
        #for t in range(0, 1):
            print(t)
            u = U[:,t]
            self.prediction(u)
            if zIdx < U.shape[1]:
                while Z[0, zIdx] == t:
                    if Z[1, zIdx] in self.mapLUT:
                        self.update(Z[2:, zIdx], Z[1, zIdx])
                    else:
                        self.augmentState(Z[2:, zIdx], Z[1, zIdx])
                    zIdx = zIdx + 1


            # You may want to call the visualization function
            # between filter steps
            if self.visualize:
                if XGT is None:
                    self.render (None)
                else:
                    self.render (XGT[:,t])
        plt.savefig("small_noise")
Exemple #3
0
class EKFSLAM(object):
    # Construct an EKF instance with the following set of variables
    #    mu:                 The initial mean vector
    #    Sigma:              The initial covariance matrix
    #    R:                  The process noise covariance
    #    Q:                  The measurement noise covariance
    #    visualize:          Boolean variable indicating whether to visualize
    #                        the filter
    def __init__(self, mu, Sigma, R, Q, visualize=True):
        self.mu = mu
        self.Sigma = Sigma
        self.R = R
        self.Q = Q

        # Maps feature IDs to row indices in mean matrix
        self.mapLUT = {}

        self.visualize = visualize
        if self.visualize == True:
            self.vis = Visualization()
        else:
            self.vis = None

    # Visualize filter strategies
    #   deltat:  Step size
    #   XGT:     Array with ground-truth pose
    def render(self, XGT=None):
        deltat = 0.1
        self.vis.drawEstimates(self.mu, self.Sigma)
        if XGT is not None:
            self.vis.drawGroundTruthPose(XGT[0], XGT[1], XGT[2])
        plt.pause(deltat)
        plt.savefig("res.png")

    # Compute z_t according to the measurement model
    def compute_zt(self, x_t, y_t, theta_t, x_m, y_m):
        sn = np.sin(theta_t)
        cs = np.cos(theta_t)

        return np.array([[cs * (x_m - x_t) + sn * (y_m - y_t)],
                         [-sn * (x_m - x_t) + cs * (y_m - y_t)]])

    # Perform the prediction step to determine the mean and covariance
    # of the posterior belief given the current estimate for the mean
    # and covariance, the control data, and the process model
    #    u:                 The forward distance and change in heading
    def prediction(self, u):

        u1 = u[0]
        u2 = u[1]
        sn = np.sin(self.mu[2])
        cs = np.cos(self.mu[2])

        # Project state ahead
        self.mu[0] = self.mu[0] + u1 * cs
        self.mu[1] = self.mu[1] + u1 * sn
        self.mu[2] = self.mu[2] + u2
        sn = np.sin(self.mu[2])
        cs = np.cos(self.mu[2])

        F = np.array([[1., 0., -u1 * sn], [0., 1., u1 * cs], [0., 0., 1.]],
                     dtype=float)

        G = np.array([[cs, 0.], [sn, 0.], [0., 1.]], dtype=float)

        # Project error covariance ahead
        self.Sigma[:STATE_LEN, :
                   STATE_LEN] = F @ self.Sigma[:STATE_LEN, :
                                               STATE_LEN] @ F.T + G @ self.R @ G.T
        self.Sigma[:STATE_LEN,
                   STATE_LEN:] = F @ self.Sigma[:STATE_LEN, STATE_LEN:]
        self.Sigma[
            STATE_LEN:, :STATE_LEN] = self.Sigma[STATE_LEN:, :STATE_LEN] @ F.T

    # Perform the measurement update step to compute the posterior
    # belief given the predictive posterior (mean and covariance) and
    # the measurement data
    #    z:     The (x,y) position of the landmark relative to the robot
    #    i:     The ID of the observed landmark
    def update(self, z, i):

        z = np.array(z).reshape((2, 1))
        x_t, y_t, theta_t = self.mu[0, 0], self.mu[1, 0], self.mu[2, 0]

        # Augment the state if this was the first time seeing the landmark
        if i not in self.mapLUT:
            self.augmentState(z, i)
            return

        # Otherwise, we've seen the landmark before, so do an update

        # Compute H
        cs = np.cos(theta_t)
        sn = np.sin(theta_t)
        h_ind = self.mapLUT[i]
        x_m, y_m = self.mu[3 + 2 * h_ind, 0], self.mu[3 + 2 * h_ind + 1, 0]

        hl = np.array([[-cs, -sn, -x_m * sn + x_t * sn + y_m * cs - y_t * cs],
                       [sn, -cs, -x_m * cs + x_t * cs - y_m * sn + y_t * sn]])
        H = np.hstack((hl, np.zeros((2, 2 * len(self.mapLUT.keys())))))
        H[0, 3 + 2 * h_ind] = cs
        H[1, 3 + 2 * h_ind] = -sn
        H[0, 3 + 2 * h_ind + 1] = sn
        H[1, 3 + 2 * h_ind + 1] = cs

        K = self.Sigma @ H.T @ np.linalg.inv(H @ self.Sigma @ H.T + self.Q)
        # TODO add noise
        h = self.compute_zt(x_t, y_t, theta_t, x_m, y_m)

        self.mu = self.mu + K @ (z - h)
        self.Sigma = self.Sigma - K @ H @ self.Sigma

    # Augment the state vector to include the new landmark
    #    z:     The (x,y) position of the landmark relative to the robot
    #    i:     The ID of the observed landmark
    def augmentState(self, z, i):

        # Update mapLUT to include the new landmark
        new_ind = len(self.mapLUT.keys())
        self.mapLUT[i] = new_ind

        # Compute the absolute position of the new landmark and augment the mean vector
        position = self.computeNewLandmarkPosition(z)
        self.mu = np.vstack((self.mu, position))
        assert 2 * (new_ind + 1) + 3 == self.mu.shape[0]

        # Compute the Jacobian of the inverse measurement function
        theta = self.mu[2, 0]
        sn = np.sin(theta)
        cs = np.cos(theta)
        zx, zy = z[0], z[1]
        G = np.hstack((np.array(
            [[1., 0., -zx * sn - zy * cs], [0., 1., zx * cs - zy * sn]],
            dtype=float), np.zeros((2, 2 * new_ind), dtype=float)))

        # Augment the covariance matrix
        tl = self.Sigma
        tr = self.Sigma @ G.T
        bl = G @ self.Sigma
        br = G @ self.Sigma @ G.T + self.Q
        self.Sigma = np.hstack((np.vstack((tl, bl)), np.vstack((tr, br))))

    # Compute the new landmark position (absolute) from the current mean vector
    # and observation. This is `g`.
    def computeNewLandmarkPosition(self, z):

        z = np.reshape(z, (2, 1))
        pos = self.mu[:2]
        theta_t = self.mu[2, 0]
        sn = np.sin(theta_t)
        cs = np.cos(theta_t)

        # The inverse of a rotation matrix is it's transpose
        unrotate = np.array([[cs, -sn], [sn, cs]], dtype=float)

        return unrotate @ z + pos

    # Runs the EKF SLAM algorithm
    #   U:        Array of control inputs, one column per time step
    #   Z:        Array of landmark observations in which each column
    #             [t; id; x; y] denotes a separate measurement and is
    #             represented by the time step (t), feature id (id),
    #             and the observed (x, y) position relative to the robot
    #   XGT:      Array of ground-truth poses (may be None)
    def run(self, U, Z, XGT=None, MGT=None):

        # Draws the ground-truth map
        if MGT is not None:
            self.vis.drawMap(MGT)
        else:
            print("No ground truth map")

        # Column index of the last observation (iterate forwards in time, but not past t)
        z_ind = 0
        # Iterate over the data
        for t in range(U.shape[1]):
            u = U[:, t]

            self.vis.ax.plot(self.mu[0], self.mu[1], "ro")

            self.prediction(u)

            # _, z_max = Z.shape
            # while z_ind < z_max and Z[0,z_ind] <= t:
            #     _, landmark_id, x, y = Z[:, z_ind]
            #     self.update((x, y), landmark_id)
            #     z_ind += 1

        # You may want to call the visualization function
        # between filter steps
        if self.visualize:
            if XGT is None:
                self.render(None)
            else:
                self.render(XGT[:, t])