コード例 #1
0
ファイル: tracking_viewer.py プロジェクト: Zber5/OpenRadar
    def __init__(self):
        self.ORIGINAL_FORWARD = sl.Translation()
        self.ORIGINAL_FORWARD.init_vector(0, 0, 1)
        self.ORIGINAL_UP = sl.Translation()
        self.ORIGINAL_UP.init_vector(0, 1, 0)
        self.ORIGINAL_RIGHT = sl.Translation()
        self.ORIGINAL_RIGHT.init_vector(1, 0, 0)
        self.znear = 0.5
        self.zfar = 100.
        self.horizontalFOV = 70.
        self.orientation_ = sl.Orientation()
        self.position_ = sl.Translation()
        self.forward_ = sl.Translation()
        self.up_ = sl.Translation()
        self.right_ = sl.Translation()
        self.vertical_ = sl.Translation()
        self.vpMatrix_ = sl.Matrix4f()
        self.projection_ = sl.Matrix4f()
        self.projection_.set_identity()
        self.setProjection(1.78)

        self.position_.init_vector(0., 5., -3.)
        tmp = sl.Translation()
        tmp.init_vector(0, 0, -4)
        tmp2 = sl.Translation()
        tmp2.init_vector(0, 1, 0)
        self.setDirection(tmp, tmp2)
        cam_rot = sl.Rotation()
        cam_rot.set_euler_angles(-50., 180., 0., False)
        self.setRotation(cam_rot)
コード例 #2
0
    def update(self):
        if(self.mouse_button[0]):
            r = sl.Rotation()
            vert=self.camera.vertical_
            tmp = vert.get()
            vert.init_vector(tmp[0] * 1.,tmp[1] * 1., tmp[2] * 1.)
            r.init_angle_translation(self.mouseMotion[0] * 0.002, vert)
            self.camera.rotate(r)

            r.init_angle_translation(self.mouseMotion[1] * 0.002, self.camera.right_)
            self.camera.rotate(r)
        
        if(self.mouse_button[1]):
            t = sl.Translation()
            tmp = self.camera.right_.get()
            scale = self.mouseMotion[0] *-0.01
            t.init_vector(tmp[0] * scale, tmp[1] * scale, tmp[2] * scale)
            self.camera.translate(t)
            
            tmp = self.camera.up_.get()
            scale = self.mouseMotion[1] * 0.01
            t.init_vector(tmp[0] * scale, tmp[1] * scale, tmp[2] * scale)
            self.camera.translate(t)

        if (self.wheelPosition != 0):          
            t = sl.Translation()  
            tmp = self.camera.forward_.get()
            scale = self.wheelPosition * -0.065
            t.init_vector(tmp[0] * scale, tmp[1] * scale, tmp[2] * scale)
            self.camera.translate(t)

        self.camera.update()

        self.mouseMotion = [0., 0.]
        self.wheelPosition = 0
コード例 #3
0
def get_pos_dt(cams, index, cnt_r=6):

    zed, zed_pose, zed_sensors = cams.zed_list[index], cams.pose_list[
        index], cams.zed_sensors_list[index]

    #https://github.com/stereolabs/zed-examples/blob/master/tutorials/tutorial%204%20-%20positional%20tracking/python/positional_tracking.py
    zed.get_position(zed_pose, sl.REFERENCE_FRAME.WORLD)
    # zed.get_sensors_data(zed_sensors, sl.TIME_REFERENCE.IMAGE)
    # zed_imu = zed_sensors.get_imu_data()  # Display the translation and timestamp
    py_translation = cams.py_translation_list[index]
    py_orientation = cams.py_orientation_list[index]
    # py_orientation = sl.Orientation()
    tx = round(zed_pose.get_translation(py_translation).get()[0], cnt_r)
    ty = round(zed_pose.get_translation(py_translation).get()[1], cnt_r)
    tz = round(zed_pose.get_translation(py_translation).get()[2], cnt_r)
    # print("Translation: Tx: {0}, Ty: {1}, Tz {2}, Timestamp: {3}\n".format(tx, ty, tz, zed_pose.timestamp.get_milliseconds()))

    # Display the orientation quaternion

    ox = round(zed_pose.get_orientation(py_orientation).get()[0], cnt_r)
    oy = round(zed_pose.get_orientation(py_orientation).get()[1], cnt_r)
    oz = round(zed_pose.get_orientation(py_orientation).get()[2], cnt_r)
    ow = round(zed_pose.get_orientation(py_orientation).get()[3], cnt_r)
    pose_lst = [tx, ty, tz, ow, ox, oy, oz]
    # get camera transform data
    R = zed_pose.get_rotation_matrix(sl.Rotation()).r.T / 1000
    t = zed_pose.get_translation(sl.Translation()).get()
    world2cam = np.hstack((R, np.dot(-R, t).reshape(3, -1)))
    K = get_camera_intrintic_info(zed)
    P = np.dot(K, world2cam)
    transform = np.vstack((P, [0, 0, 0, 1]))
    return pose_lst, transform
コード例 #4
0
ファイル: viewer.py プロジェクト: Baldins/crowd_perception
 def setDirection(self, dir, vert):
     dir.normalize()
     tmp = dir.get()
     dir.init_vector(tmp[0] * -1.,tmp[1] * -1., tmp[2] * -1.)
     self.orientation_.init_translation(self.ORIGINAL_FORWARD, dir)
     self.updateVectors()
     self.vertical_ = vert
     if(sl.Translation.dot_translation(self.vertical_, self.up_) < 0.):
         tmp = sl.Rotation()
         tmp.init_angle_translation(3.14, self.ORIGINAL_FORWARD)
         self.rotate(tmp)