Example #1
0
    def map_line_to_predicted_measurement(self, m):
        alpha, r = m

        #### TODO ####
        # compute h, Hx
        ##############

        x, y, th = self.x  # Coordinates of base frame in world frame
        xc, yc, thc = self.tf_base_to_camera  # Corrdinates of camera in base frame
        # Coordinates of camera in world frame
        x_ = x + xc * cos(th) - yc * sin(th)
        y_ = y + xc * sin(th) + yc * cos(th)
        th_ = th + thc

        # Eq. 5.94
        h = np.array([alpha - (th_), r - (x_ * cos(alpha) + y_ * sin(alpha))])
        Hx = np.array(
            [[0, 0, -1],
             [
                 -cos(alpha), -sin(alpha),
                 yc * cos(alpha - th_ + thc) - xc * sin(alpha - th_ + thc)
             ]])

        ##############
        flipped, h = normalize_line_parameters(h)
        if flipped:
            Hx[1, :] = -Hx[1, :]

        return h, Hx
Example #2
0
    def map_line_to_predicted_measurement(self, m):
        alpha, r = m

        # compute h, Hx
        x, y, theta = self.x
        x_cam, y_cam, theta_cam = self.tf_base_to_camera  # camera location in robot frame
        # compute camera location in world frame
        T_bot_to_w = np.array([[np.cos(theta), -np.sin(theta), x],
                               [np.sin(theta), np.cos(theta), y], [0., 0.,
                                                                   1.]])
        x_cam_w, y_cam_w, _ = T_bot_to_w.dot(np.array([x_cam, y_cam, 1.]))

        h = np.array([
            alpha - theta - theta_cam,
            r - x_cam_w * np.cos(alpha) - y_cam_w * np.sin(alpha)
        ])

        Hx = np.array([
            [0., 0., -1.],
            [
                -np.cos(alpha), -np.sin(alpha),
                (y_cam * np.cos(alpha) - x_cam * np.sin(alpha)) * np.cos(theta)
                +
                (x_cam * np.cos(alpha) + y_cam * np.sin(alpha)) * np.sin(theta)
            ]
        ])

        flipped, h = normalize_line_parameters(h)
        if flipped:
            Hx[1, :] = -Hx[1, :]

        return h, Hx
Example #3
0
    def map_line_to_predicted_measurement(self, m):
        alpha, r = m

        # find the pose of the robot in the world frame
        x, y, th = self.x
        # find the pose of the camera in the robot frame
        x_cam, y_cam, th_cam = self.tf_base_to_camera

        # compute the line parameters in the camera frame
        h = np.array([
            alpha - th - th_cam, r - x * cos(alpha) - y * sin(alpha) -
            x_cam * cos(alpha - th) - y_cam * sin(alpha - th)
        ])

        # compute the Jacobian of h with respect to the belief mean
        Hx = np.array([[0, 0, -1],
                       [
                           -cos(alpha), -sin(alpha),
                           -x_cam * sin(alpha - th) + y_cam * cos(alpha - th)
                       ]])

        flipped, h = normalize_line_parameters(h)
        if flipped:
            Hx[1, :] = -Hx[1, :]

        return h, Hx
Example #4
0
    def map_line_to_predicted_measurement(self, m):
        alpha, r = m
        
        x, y, th = self.x
        xb2c, yb2c, thb2c = self.tf_base_to_camera

        # position of camera coordinate system in world frame
        xcam = x + xb2c*cos(th) - yb2c*sin(th)
        ycam = y + xb2c*sin(th) + yb2c*cos(th)

        rcam = sqrt(xcam**2 + ycam**2)
        acam = math.atan2(ycam, xcam)

        # distance from camera to line m
        rprime = r - rcam*cos(acam-alpha)

        # angle from camera frame to line m
        aprime = alpha - th - thb2c

        h = np.array([aprime, rprime]);

        dh2dx  = -cos(alpha)
        dh2dy  = -sin(alpha)
        dh2dth = (cos(alpha)*(xb2c*sin(th)+yb2c*cos(th)) -
                  sin(alpha)*(xb2c*cos(th)-yb2c*sin(th)))

        Hx = np.array([[0, 0, -1],
                       [dh2dx, dh2dy, dh2dth]]);

        flipped, h = normalize_line_parameters(h)
        if flipped:
            Hx[1,:] = -Hx[1,:]

        return h, Hx
Example #5
0
    def map_line_to_predicted_measurement(self, j):
        alpha, r = self.x[(3+2*j):(3+2*j+2)]    # j is zero-indexed! (yeah yeah I know this doesn't match the pset writeup)

        #### TODO ####
        # compute h, Hx (you may find the skeleton for computing Hx below useful)

        x,   y,   th   = self.x[:3]
        x_c, y_c, th_c = self.tf_base_to_camera

        Hx = np.zeros((2,self.x.size))

        h, Hx[:,:3] = generic_map_line_to_predicted_measurement(alpha, r, self.x[:3], self.tf_base_to_camera)
        # First two map lines are assumed fixed so we don't want to propagate any measurement correction to them
        if j > 1:
            Hx[0, 3+2*j] = 1
            Hx[1, 3+2*j] = x*np.sin(alpha) + y*np.cos(alpha) + x_c*np.sin(alpha-th) + y_c*np.cos(alpha-th)
            '''
            Hx[1, 3+2*j] = np.sin(alpha) * (x + x_c *np.cos(th) - y_c * np.sin(th)) + \
            			   np.cos(alpha) * (y - x_c *np.sin(th) + y_c * np.cos(th))
		    '''
            Hx[0, 3+2*j+1] = 0
            Hx[1, 3+2*j+1] = 1

        ##############

        flipped, h = normalize_line_parameters(h)
        if flipped:
            Hx[1,:] = -Hx[1,:]

        return h, Hx
Example #6
0
    def map_line_to_predicted_measurement(self, j):
        alpha, r = self.x[(3 + 2 * j):(
            3 + 2 * j + 2
        )]  # j is zero-indexed! (yeah yeah I know this doesn't match the pset writeup)

        # Use mean expected position to compute line location
        x, y, theta = self.x[0:3]
        # (x, y, theta) transform from the robot base to the camera frame
        xcam, ycam, theta_cam = self.tf_base_to_camera

        alpha_cam = alpha - theta_cam - theta

        # rotation matrix from robot reference frame to world frame
        R_cam = np.matrix([[np.cos(theta), -np.sin(theta), 0],
                           [np.sin(theta), np.cos(theta), 0], [0, 0, 1]])
        # rotation matrix to line reference frame from world frame
        R_line = np.matrix([[np.cos(-alpha), -np.sin(-alpha)],
                            [np.sin(-alpha), np.cos(-alpha)]])

        # Find camera coordinates in robot frame
        # (x_cam_w, y_cam_w, theta_cam_w)
        cam_world_coords = np.squeeze(
            np.asarray(np.dot(R_cam, self.tf_base_to_camera)))
        # Offset for camera coords in world frame
        cam_w = np.array([x + cam_world_coords[0], y + cam_world_coords[1]])

        r_proj = np.squeeze(np.asarray(np.dot(R_line, cam_w)))
        r_cam = r - r_proj[0]

        # Store results in h array
        h = np.array([alpha_cam, r_cam])

        Hx = np.zeros((2, self.x.size))

        Hx[0, 2] = -1
        Hx[1, 0] = -np.cos(alpha)
        Hx[1, 1] = -np.sin(alpha)
        Hx[1, 2] = -(np.cos(alpha) *
                     (-np.sin(theta) * xcam - np.cos(theta) * ycam) +
                     np.sin(alpha) *
                     (np.cos(theta) * xcam - np.sin(theta) * ycam))

        # First two map lines are assumed fixed so we don't want to propagate
        # any measurement correction to them
        if j > 1:
            Hx[0, 3 + 2 * j] = 1
            Hx[1, 3 + 2 * j] = np.sin(alpha) * (
                x + np.cos(theta) * xcam - np.sin(theta) * ycam) + np.cos(
                    alpha) * (y + np.sin(theta) * xcam + np.cos(theta) * ycam)
            Hx[0, 3 + 2 * j + 1] = 0
            Hx[1, 3 + 2 * j + 1] = 1

        ##############

        flipped, h = normalize_line_parameters(h)
        if flipped:
            Hx[1, :] = -Hx[1, :]

        return h, Hx
Example #7
0
    def map_line_to_predicted_measurement(self, m):
        alpha, r = m

        #### TODO ####
        # compute h, Hx
        ##############

        flipped, h = normalize_line_parameters(h)
        if flipped:
            Hx[1, :] = -Hx[1, :]

        return h, Hx
Example #8
0
    def map_line_to_predicted_measurement(self, j):
        alpha, r = self.x[(3 + 2 * j):(
            3 + 2 * j + 2
        )]  # j is zero-indexed! (yeah yeah I know this doesn't match the pset writeup)

        #### TODO ####
        # compute h, Hx (you may find the skeleton for computing Hx below useful)

        # Hx = np.zeros((2,self.x.size))
        # Hx[:,:3] = FILLMEIN
        # First two map lines are assumed fixed so we don't want to propagate any measurement correction to them
        # if j > 1:
        #     Hx[0, 3+2*j] = FILLMEIN
        #     Hx[1, 3+2*j] = FILLMEIN
        #     Hx[0, 3+2*j+1] = FILLMEIN
        #     Hx[1, 3+2*j+1] = FILLMEIN

        ##############

        # find the pose of the robot in the world frame
        x, y, th = self.x[:3]
        # find the pose of the camera in the robot frame
        x_cam, y_cam, th_cam = self.tf_base_to_camera

        # compute the line parameters in the camera frame
        h = np.array([
            alpha - th - th_cam, r - x * cos(alpha) - y * sin(alpha) -
            x_cam * cos(alpha - th) - y_cam * sin(alpha - th)
        ])

        # compute the Jacobian of h with respect to the belief mean
        Hx = np.zeros((2, self.x.size))
        Hx[:, :3] = np.array(
            [[0, 0, -1],
             [
                 -cos(alpha), -sin(alpha),
                 -x_cam * sin(alpha - th) + y_cam * cos(alpha - th)
             ]])
        # First two map lines are assumed fixed so we don't want to propagate any measurement correction to them
        if j > 1:
            Hx[0, 3 + 2 * j] = 1
            Hx[1, 3 + 2 * j] = x * sin(alpha) - y * cos(alpha) + x_cam * sin(
                alpha - th) - y_cam * cos(alpha - th)
            Hx[0, 3 + 2 * j + 1] = 0
            Hx[1, 3 + 2 * j + 1] = 1

        flipped, h = normalize_line_parameters(h)
        if flipped:
            Hx[1, :] = -Hx[1, :]

        return h, Hx
Example #9
0
    def map_line_to_predicted_measurement(self, m):
        alpha, r = m

        #### TODO ####
        # compute h, Hx
        h, Hx = map_line_to_predicted_measurement_EKF(alpha, r, self.x,
                                                      self.tf_base_to_camera)
        ##############

        flipped, h = normalize_line_parameters(h)
        if flipped:
            Hx[1, :] = -Hx[1, :]

        return h, Hx
Example #10
0
    def map_line_to_predicted_measurement(self, m):
        alpha, r = m

        # Use mean expected position to compute line location
        x, y, theta = self.x
        # (x, y, theta) transform from the robot base to the camera frame
        xcam, ycam, theta_cam = self.tf_base_to_camera

        alpha_cam = alpha - theta_cam - theta

        # rotation matrix from robot reference frame to world frame
        R_cam = np.matrix([[np.cos(theta), -np.sin(theta), 0],
                           [np.sin(theta), np.cos(theta), 0], [0, 0, 1]])
        # rotation matrix to line reference frame from world frame
        R_line = np.matrix([[np.cos(-alpha), -np.sin(-alpha)],
                            [np.sin(-alpha), np.cos(-alpha)]])

        # Find camera coordinates in robot frame
        # (x_cam_w, y_cam_w, theta_cam_w)
        cam_world_coords = np.squeeze(
            np.asarray(np.dot(R_cam, self.tf_base_to_camera)))
        # Offset for camera coords in world frame
        cam_w = np.array([x + cam_world_coords[0], y + cam_world_coords[1]])

        r_proj = np.squeeze(np.asarray(np.dot(R_line, cam_w)))
        r_cam = r - r_proj[0]

        # Store results in h array
        h = np.array([alpha_cam, r_cam])

        # Create and populate Hx matrix
        Hx = np.zeros((2, 3))
        Hx[0, 2] = -1
        Hx[1, 0] = -np.cos(alpha)
        Hx[1, 1] = -np.sin(alpha)
        Hx[1, 2] = -(np.cos(alpha) *
                     (-np.sin(theta) * xcam - np.cos(theta) * ycam) +
                     np.sin(alpha) *
                     (np.cos(theta) * xcam - np.sin(theta) * ycam))

        flipped, h = normalize_line_parameters(h)
        if flipped:
            Hx[1, :] = -Hx[1, :]

        return h, Hx
    def map_line_to_predicted_measurement(self, j):
        alpha, r = self.x[(3 + 2 * j):(
            3 + 2 * j + 2
        )]  # j is zero-indexed! (yeah yeah I know this doesn't match the pset writeup)

        #### TODO ####
        # compute h, Hx (you may find the skeleton for computing Hx below useful)
        x, y, theta = self.x[:3]
        x_cam, y_cam, theta_cam = self.tf_base_to_camera  # camera location in robot frame

        # compute camera location in world frame
        T_bot_to_w = np.array([[np.cos(theta), -np.sin(theta), x],
                               [np.sin(theta), np.cos(theta), y], [0., 0.,
                                                                   1.]])
        x_cam_w, y_cam_w, _ = T_bot_to_w.dot(np.array([x_cam, y_cam, 1.]))
        # compute h, Hx
        h = np.array([
            alpha - theta - theta_cam,
            r - x_cam_w * np.cos(alpha) - y_cam_w * np.sin(alpha)
        ])
        Hx = np.zeros((2, self.x.size))
        Hx[:, :3] = np.array([
            [0., 0., -1.],
            [
                -np.cos(alpha), -np.sin(alpha),
                (y_cam * np.cos(alpha) - x_cam * np.sin(alpha)) * np.cos(theta)
                +
                (x_cam * np.cos(alpha) + y_cam * np.sin(alpha)) * np.sin(theta)
            ]
        ])
        # First two map lines are assumed fixed so we don't want to propagate any measurement correction to them
        if j > 1:
            Hx[0, 3 + 2 * j] = 1.0
            Hx[1,
               3 + 2 * j] = x_cam_w * np.sin(alpha) - y_cam_w * np.cos(alpha)
            Hx[0, 3 + 2 * j + 1] = 0.0
            Hx[1, 3 + 2 * j + 1] = 1.0

        ##############

        flipped, h = normalize_line_parameters(h)
        if flipped:
            Hx[1, :] = -Hx[1, :]

        return h, Hx
Example #12
0
    def map_line_to_predicted_measurement(self, j):
        alpha, r = self.x[(3 + 2 * j):(
            3 + 2 * j + 2
        )]  # j is zero-indexed! (yeah yeah I know this doesn't match the pset writeup)

        #### TODO ####
        # compute h, Hx (you may find the skeleton for computing Hx below useful)

        x, y, th = self.x[:3]  # Coordinates of base frame in world frame
        xc, yc, thc = self.tf_base_to_camera  # Corrdinates of camera in base frame
        # Coordinates of camera in world frame
        x_ = x + xc * cos(th) - yc * sin(th)
        y_ = y + xc * sin(th) + yc * cos(th)
        th_ = th + thc

        # Eq. 5.94
        h = np.array([alpha - (th_), r - (x_ * cos(alpha) + y_ * sin(alpha))])

        Hx = np.zeros((2, self.x.size))
        Hx[:, :3] = np.array(
            [[0, 0, -1],
             [
                 -cos(alpha), -sin(alpha),
                 yc * cos(alpha - th_ + thc) - xc * sin(alpha - th_ + thc)
             ]])
        # First two map lines are assumed fixed so we don't want to propagate any measurement correction to them
        if j > 1:
            Hx[0, 3 + 2 * j] = 1
            Hx[1, 3 + 2 * j] = x_ * sin(alpha) - y_ * cos(alpha)
            Hx[0, 3 + 2 * j + 1] = 0
            Hx[1, 3 + 2 * j + 1] = 1

        ##############

        flipped, h = normalize_line_parameters(h)
        if flipped:
            Hx[1, :] = -Hx[1, :]

        return h, Hx
Example #13
0
    def map_line_to_predicted_measurement(self, j):
        alpha, r = self.x[(3+2*j):(3+2*j+2)]    # j is zero-indexed! (yeah yeah I know this doesn't match the pset writeup)

        x, y, th = self.x[:3]
        xb2c, yb2c, thb2c = self.tf_base_to_camera

        # position of camera coordinate system in world frame (taken from Q1)
        xcam = x + xb2c*cos(th) - yb2c*sin(th)
        ycam = y + xb2c*sin(th) + yb2c*cos(th)
        rcam = sqrt(xcam**2 + ycam**2)
        acam = math.atan2(ycam, xcam)

        aprime = alpha - th - thb2c 
        rprime = r - rcam*cos(acam-alpha)
        h = np.array([aprime, rprime]);

        # also taken from Q1
        dh2dx  = -cos(alpha)
        dh2dy  = -sin(alpha)
        dh2dth = (cos(alpha)*(xb2c*sin(th)+yb2c*cos(th)) -
                  sin(alpha)*(xb2c*cos(th)-yb2c*sin(th)))
        
        Hx = np.zeros((2,self.x.size))
        Hx[:,:3] = np.array([[0, 0, -1],
                       [dh2dx, dh2dy, dh2dth]]);

        # First two map lines are assumed fixed so we don't want to propagate any measurement correction to them
        if j > 1:
            Hx[0, 3+2*j] = 1
            Hx[1, 3+2*j] = -rcam*sin(acam-alpha)
            Hx[0, 3+2*j+1] = 0
            Hx[1, 3+2*j+1] = 1

        flipped, h = normalize_line_parameters(h)
        if flipped:
            Hx[1,:] = -Hx[1,:]

        return h, Hx
Example #14
0
    def map_line_to_predicted_measurement(self, j):
        alpha, r = self.x[(3 + 2 * j):(
            3 + 2 * j + 2
        )]  # j is zero-indexed! (yeah yeah I know this doesn't match the pset writeup)

        #### TODO ####
        # compute h, Hx (you may find the skeleton for computing Hx below useful)

        Hx = np.zeros((2, self.x.size))
        Hx[:, :3] = FILLMEIN
        # First two map lines are assumed fixed so we don't want to propagate any measurement correction to them
        if j > 1:
            Hx[0, 3 + 2 * j] = FILLMEIN
            Hx[1, 3 + 2 * j] = FILLMEIN
            Hx[0, 3 + 2 * j + 1] = FILLMEIN
            Hx[1, 3 + 2 * j + 1] = FILLMEIN

        ##############

        flipped, h = normalize_line_parameters(h)
        if flipped:
            Hx[1, :] = -Hx[1, :]

        return h, Hx
    def map_line_to_predicted_measurement(self, m):
        alpha, r = m

        #### TODO ####
        # compute h, Hx
        # First let's find our camera's location in the robot frame
        x_camera, y_camera, theta_camera = self.tf_base_to_camera
        x, y, theta = self.x

        # Now, we will find our camera's location in the world frame
        # Let's first find the transformation matrix
        Transformation_robot_to_world = np.array(
            [[np.cos(theta), -np.sin(theta), x],
             [np.sin(theta), np.cos(theta), y], [0., 0., 1.]])
        x_camera_world, y_camera_world, _ = Transformation_robot_to_world.dot(
            np.array([x_camera, y_camera, 1.]))
        h = np.array([
            alpha - theta - theta_camera,
            r - x_camera_world * np.cos(alpha) - y_camera_world * np.sin(alpha)
        ])
        Hx = np.array(
            [[0., 0., -1.],
             [
                 -np.cos(alpha), -np.sin(alpha),
                 (y_camera * np.cos(alpha) - x_camera * np.sin(alpha)) *
                 np.cos(theta) +
                 (x_camera * np.cos(alpha) + y_camera * np.sin(alpha)) *
                 np.sin(theta)
             ]])
        ##############

        flipped, h = normalize_line_parameters(h)
        if flipped:
            Hx[1, :] = -Hx[1, :]

        return h, Hx