예제 #1
0
    def chessboard_size(self, lcorners, rcorners, board):
        """
        Compute the square edge length from two sets of matching undistorted points
        given the current calibration.
        """
        # Project the points to 3d
        cam = image_geometry.StereoCameraModel()
        cam.fromCameraInfo(*self.as_message())
        disparities = [(x0 - x1)
                       for ((x0, y0), (x1, y1)) in zip(lcorners, rcorners)]
        pt3d = [
            cam.projectPixelTo3d((x, y), d)
            for ((x, y), d) in zip(lcorners, disparities)
        ]

        def l2(p0, p1):
            return math.sqrt(sum([(c0 - c1)**2 for (c0, c1) in zip(p0, p1)]))

        # Compute the length from each horizontal and vertical line, and return the mean
        cc = board.n_cols
        cr = board.n_rows
        lengths = ([
            l2(pt3d[cc * r + 0], pt3d[cc * r + (cc - 1)]) / (cc - 1)
            for r in range(cr)
        ] + [
            l2(pt3d[c + 0], pt3d[c + (cc * (cr - 1))]) / (cr - 1)
            for c in range(cc)
        ])
        return sum(lengths) / len(lengths)
예제 #2
0
    def handle_stereo(self, msg):

        (lmsg, lcmsg, rmsg, rcmsg) = msg
        lgray = self.mkgray(lmsg)
        rgray = self.mkgray(rmsg)

        sc = StereoCalibrator([self.board])

        L = self.image_corners(lgray)
        R = self.image_corners(rgray)
        scm = image_geometry.StereoCameraModel()
        scm.fromCameraInfo(lcmsg, rcmsg)
        if L and R:
            d = [(y0 - y1) for ((_, y0), (_, y1)) in zip(L, R)]
            epipolar = math.sqrt(sum([i**2 for i in d]) / len(d))

            disparities = [(x0 - x1) for ((x0, y0), (x1, y1)) in zip(L, R)]
            pt3d = [scm.projectPixelTo3d((x, y), d) for ((x, y), d) in zip(L, disparities)]
            def l2(p0, p1):
                return math.sqrt(sum([(c0 - c1) ** 2 for (c0, c1) in zip(p0, p1)]))

            # Compute the length from each horizontal and vertical lines, and return the mean
            cc = self.board.n_cols
            cr = self.board.n_rows
            lengths = (
                [l2(pt3d[cc * r + 0], pt3d[cc * r + (cc - 1)]) / (cc - 1) for r in range(cr)] +
                [l2(pt3d[c + 0], pt3d[c + (cc * (cr - 1))]) / (cr - 1) for c in range(cc)])
            dimension = sum(lengths) / len(lengths)

            print "epipolar error: %f pixels   dimension: %f m" % (epipolar, dimension)
        else:
            print "no chessboard"
예제 #3
0
    def convert_stereo(self, cp1, cp2):
        if cp1.camera_info.P[3] == 0:
            left = cp1
            right = cp2
        else:
            left = cp2
            right = cp1
        u = left.x
        v = left.y
        disparity = left.x - right.x

        stereo_model = image_geometry.StereoCameraModel()
        stereo_model.fromCameraInfo(left.camera_info, right.camera_info)
        (x, y, z) = stereo_model.projectPixelTo3d((u, v), disparity)
        camera_pt = PointStamped()
        camera_pt.header.frame_id = left.camera_info.header.frame_id
        camera_pt.header.stamp = rospy.Time.now()
        camera_pt.point.x = x
        camera_pt.point.y = y
        camera_pt.point.z = z
        self.listener.waitForTransform(self.output_frame,
                                       camera_pt.header.frame_id,
                                       rospy.Time.now(), rospy.Duration(4.0))
        output_point = self.listener.transformPoint(self.output_frame,
                                                    camera_pt)

        return output_point
예제 #4
0
    def chessboard_size(self, lcorners, rcorners, board, msg=None):
        """
        Compute the square edge length from two sets of matching undistorted points
        given the current calibration.
        :param msg: a tuple of (left_msg, right_msg)
        """
        # Project the points to 3d
        cam = image_geometry.StereoCameraModel()
        if msg == None:
            msg = self.as_message()
        cam.fromCameraInfo(*msg)
        disparities = lcorners[:, :, 0] - rcorners[:, :, 0]
        pt3d = [
            cam.projectPixelTo3d((lcorners[i, 0, 0], lcorners[i, 0, 1]),
                                 disparities[i, 0])
            for i in range(lcorners.shape[0])
        ]  ##3dpoints? TODO:

        def l2(p0, p1):
            return math.sqrt(sum([(c0 - c1)**2 for (c0, c1) in zip(p0, p1)]))

        # Compute the length from each horizontal and vertical line, and return the mean
        cc = board.n_cols
        cr = board.n_rows
        lengths = ([
            l2(pt3d[cc * r + 0], pt3d[cc * r + (cc - 1)]) / (cc - 1)
            for r in range(cr)
        ] + [
            l2(pt3d[c + 0], pt3d[c + (cc * (cr - 1))]) / (cr - 1)
            for c in range(cc)
        ])
        return sum(lengths) / len(lengths)
예제 #5
0
 def convertStereo(self, u, v, disparity):
     """ 
     Converts two pixel coordinates u and v along with the disparity to give PointStamped 
     """
     stereoModel = image_geometry.StereoCameraModel()
     stereoModel.fromCameraInfo(self.info['left'], self.info['right'])
     (x, y, z) = stereoModel.projectPixelTo3d((u, v), disparity)
     return np.array((x, y, z))
예제 #6
0
def projectToPixel(pt, info=None):
    x, y, z = pt
    if info is None:
        with open("camera_data/camera_info.p", "rb") as f:
            info = pickle.load(f)
    stereoModel = image_geometry.StereoCameraModel()
    stereoModel.fromCameraInfo(info['l'], info['r'])
    left, right = stereoModel.project3dToPixel((x, y, z))
    return (left, right)
예제 #7
0
    def handle_stereo(self, qq):

        (lmsg, lcmsg, rmsg, rcmsg) = qq
        limg = CvBridge().imgmsg_to_cv(lmsg, "mono8")
        rimg = CvBridge().imgmsg_to_cv(rmsg, "mono8")

        if 0:
            cv.SaveImage("/tmp/l%06d.png" % self.i, limg)
            cv.SaveImage("/tmp/r%06d.png" % self.i, rimg)
            self.i += 1

        scm = image_geometry.StereoCameraModel()
        scm.fromCameraInfo(lcmsg, rcmsg)

        bm = cv.CreateStereoBMState()
        if "wide" in rospy.resolve_name("stereo"):
            bm.numberOfDisparities = 160
        if 0:
            disparity = cv.CreateMat(limg.rows, limg.cols, cv.CV_16SC1)
            started = time.time()
            cv.FindStereoCorrespondenceBM(limg, rimg, disparity, bm)
            print time.time() - started
            ok = cv.CreateMat(limg.rows, limg.cols, cv.CV_8UC1)
            cv.CmpS(disparity, 0, ok, cv.CV_CMP_GT)
            cv.ShowImage("limg", limg)
            cv.ShowImage("disp", ok)
            cv.WaitKey(6)
        self.track(CvBridge().imgmsg_to_cv(lmsg, "rgb8"))
        if len(self.tracking) == 0:
            print "No markers found"
        for code, corners in self.tracking.items():
            corners3d = []
            for (x, y) in corners:
                limr = cv.GetSubRect(limg, (0, y - bm.SADWindowSize / 2, limg.cols, bm.SADWindowSize + 1))
                rimr = cv.GetSubRect(rimg, (0, y - bm.SADWindowSize / 2, rimg.cols, bm.SADWindowSize + 1))
                tiny_disparity = cv.CreateMat(limr.rows, limg.cols, cv.CV_16SC1)
                cv.FindStereoCorrespondenceBM(limr, rimr, tiny_disparity, bm)
                if tiny_disparity[7, x] < 0:
                    return
                corners3d.append(scm.projectPixelTo3d((x, y), tiny_disparity[7, x] / 16.))
                if 0:
                    cv.ShowImage("d", disparity)
            (a, b, c, d) = [numpy.array(pt) for pt in corners3d]
            def normal(s, t):
                return (t - s) / numpy.linalg.norm(t - s)
            x = PyKDL.Vector(*normal(a, b))
            y = PyKDL.Vector(*normal(a, d))
            f = PyKDL.Frame(PyKDL.Rotation(x, y, x * y), PyKDL.Vector(*a))
            msg = pm.toMsg(f)
            # print "%10f %10f %10f" % (msg.position.x, msg.position.y, msg.position.z)
            print code, msg.position.x, msg.position.y, msg.position.z
            self.broadcast(lmsg.header, code, msg)
예제 #8
0
def convertStereo(u, v, disparity, info):
    """
    Converts two pixel coordinates u and v along with the disparity to give PointStamped       
    """
    stereoModel = image_geometry.StereoCameraModel()
    stereoModel.fromCameraInfo(info['l'], info['r'])
    (x, y, z) = stereoModel.projectPixelTo3d((u, v), disparity)

    cameraPoint = PointStamped()
    cameraPoint.header.frame_id = info['l'].header.frame_id
    cameraPoint.header.stamp = time.time()
    cameraPoint.point = Point(x, y, z)
    return cameraPoint
예제 #9
0
    def __init__(self, data_path="~/data/"):
        '''
		self.cam_info = rospy.wait_for_message("/kinect2/sd/camera_info", CameraInfo, timeout=None)
		self.cam_model = image_geometry.PinholeCameraModel()
		self.cam_model.fromCameraInfo(self.cam_info)
		'''
        self.data_path = data_path
        self.rgb_cam_info = rospy.wait_for_message("/rgb_to_depth/camera_info",
                                                   CameraInfo,
                                                   timeout=None)
        self.depth_cam_info = rospy.wait_for_message("/depth/camera_info",
                                                     CameraInfo,
                                                     timeout=None)
        self.cam_model = image_geometry.StereoCameraModel()
        self.cam_model.fromCameraInfo(self.depth_cam_info, self.rgb_cam_info)
예제 #10
0
def convertStereo(u, v, disparity, info=None):
    """
    Converts two pixel coordinates u and v along with the disparity to give PointStamped
    """
    stereoModel = image_geometry.StereoCameraModel()
    if info is None:
        with open("camera_data/camera_info.p", "rb") as f:
            info = pickle.load(f)
    stereoModel.fromCameraInfo(info['l'], info['r'])
    (x, y, z) = stereoModel.projectPixelTo3d((u, v), disparity)

    cameraPoint = PointStamped()
    cameraPoint.header.frame_id = info['l'].header.frame_id
    cameraPoint.header.stamp = rospy.Time.now()
    cameraPoint.point = Point(x, y, z)
    return cameraPoint
예제 #11
0
    def imageCallback(self, data):
        if self.camera is None and self.leftInfo is not None and self.rightInfo is not None:
            self.camera = image_geometry.StereoCameraModel()
            self.camera.fromCameraInfo(self.leftInfo, self.rightInfo)
        elif self.camera is None:
            return

        # print 'Time between image calls:', time.time() - self.imageTime
        # startTime = time.time()

        try:
            image = self.bridge.imgmsg_to_cv(data.image)
            image = np.asarray(image[:,:])
        except CvBridgeError, e:
            print e
            return
 def __init__(self):
     self.image_pub = rospy.Publisher("/tb3_0/camera/rgb/image_opencv",
                                      Image,
                                      queue_size=10)
     self.evader_position_pub = rospy.Publisher("/evader_position",
                                                PoseStamped,
                                                queue_size=10)
     self.bridge = CvBridge()
     self.image_sub = rospy.Subscriber("/tb3_0/rrbot/camera1/image_raw",
                                       Image, self.callback)
     self.depth_sub = rospy.Subscriber("/kinect_camera_bot/depth/image_raw",
                                       Image, self.depth_callback)
     self.hog = cv2.HOGDescriptor()
     self.hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
     self.camera_model = img_geo.StereoCameraModel()
     IPython.embed()
예제 #13
0
def get_points_3d(left_points, right_points, info):
    """ 
    Assumes that corresponding (2-D pixel) points are ordered correctly 
    in `left_points` and `right_points`. Returns a numpy array of 3D camera points.
    https://github.com/BerkeleyAutomation/endoscope_calibration/blob/master/rigid_transformation.py
    """
    stereoModel = image_geometry.StereoCameraModel()
    stereoModel.fromCameraInfo(info['l'], info['r'])
    points_3d = []

    for i in range(len(left_points)):
        a = left_points[i]
        b = right_points[i]
        disparity = np.sqrt((a[0] - b[0])**2 + (a[1] - b[1])**2)
        assert disparity == np.linalg.norm(np.array(a) - np.array(b))
        (x, y, z) = stereoModel.projectPixelTo3d((a[0], a[1]), disparity)
        points_3d.append([x, y, z])

    return np.array(points_3d)
예제 #14
0
def left_pixel_to_robot_prediction(left, params, true_points):
    """ Similar to the method in `click_and_crop.py` except that we have a full list of
    points from the left camera.
    """
    pred_points = []

    for left_pt in left:
        leftx, lefty = left_pt
        left_pt_hom = np.array([leftx, lefty, 1.])
        right_pt = left_pt_hom.dot(params['theta_l2r'])

        # Copy the code I wrote to convert these pts to camera points.
        disparity = np.linalg.norm(left_pt - right_pt)
        stereoModel = image_geometry.StereoCameraModel()
        stereoModel.fromCameraInfo(C_LEFT_INFO, C_RIGHT_INFO)
        (xx, yy, zz) = stereoModel.projectPixelTo3d((leftx, lefty), disparity)
        camera_pt = np.array([xx, yy, zz])

        # Now I can apply the rigid body and RF (if desired).
        camera_pt = np.concatenate((camera_pt, np.ones(1)))
        robot_pt = (params['RB_matrix']).dot(camera_pt)
        target = [robot_pt[0], robot_pt[1], robot_pt[2]]
        pred_points.append(target)

    pred_points = np.array(pred_points)

    print(
        "\nAssuming we ONLY have the left pixels, we still predict the robot points."
    )
    print("pred points:\n{}".format(pred_points))
    if true_points is not None:
        abs_errors = np.abs(pred_points - true_points)
        abs_mean_errors = np.mean(abs_errors, axis=0)
        print("true points:\n{}".format(true_points))
        print("mean abs err: {}".format(abs_mean_errors))
        print("mean err: {}".format(np.mean(pred_points - true_points,
                                            axis=0)))
    print("Done w/debugging.\n")
예제 #15
0
    def execute(self, goal):
        self.targetType = goal.objectType
        
        self.running = True
        self.ok = True
        
        if self.leftMsg is not None and self.rightMsg is not None:
            self.stereoModel = image_geometry.StereoCameraModel()
            self.stereoModel.fromCameraInfo(self.leftMsg, self.rightMsg)

        feedback = TrackObjectFeedback()
        while running:
            if self.server.is_preempt_requested() or self.server.is_new_goal_available():
                self.running = False
                continue
            
            if self.targetType == TrackObjectGoal.navbar:
                #Process navbar stuff
                self.feedback = self.navBarFinder.process(self.downImage, self.downModel)
                self.server.publish_feedback(self.feedback)
            elif self.targetType == TrackObjectGoal.startGate:
                self.feedback = self.gatefinder.process(self.leftImage, self.rightImage, self.disparityImage, self.leftModel, self.stereoModel)
                self.server.publish_feedback(self.feedback)
#TODO: Fix color thresholds
            elif self.targetType == TrackObjectGoal.redBouy:
                #process red bouy
                self.feedback = self.bouyFinder.process(self.leftImage, self.disparityImage, self.leftModel, self.stereoModel, self.thresholds['red'])
                self.server.publish_feedback(self.feedback)
            elif self.targetType == TrackObjectGoal.yellowBouy:
                #process yellow bouy
                self.feedback = self.bouyFinder.process(self.leftImage, self.disparityImage, self.leftModel, self.stereoModel, self.thresholds['yellow'])
                self.server.publish_feedback(self.feedback) 
            elif self.targetType == TrackObjectGoal.greenBouy:
                #process green bouy
                self.feedback = self.bouyFinder.process(self.leftImage, self.disparityImage, self.leftModel, self.stereoModel, self.thresholds['green'])
                self.server.publish_feedback(self.feedback)
        self.response.stoppedOk = self.ok
        self.server.set_succeeded(self.response)
예제 #16
0
    def determineROI(self, side=None):
        """
        Determines the boundaries of a region of interest, based on 
        the pixel coordinates of the gripper as given by the inverse kinematics of the robot
        """
        ROI_width = self.gripper_ROI_size.width
        ROI_height = self.gripper_ROI_size.height

        pose, timeSinceDetection = self.getEstimatedPose()
        stereoModel = image_geometry.StereoCameraModel()
        stereoModel.fromCameraInfo(self.info['left'], self.info['right'])
        (u_l, v_l), (u_r,
                     v_r) = stereoModel.project3dToPixel(pose.position.list)
        ROI_left = ((int(u_l - ROI_width), int(v_l - ROI_height)),
                    (int(u_l + ROI_width), int(v_l + ROI_height)))
        ROI_right = ((int(u_r - ROI_width), int(v_r - ROI_height)),
                     (int(u_r + ROI_width), int(v_r + ROI_height)))
        ROI = {'left': ROI_left, 'right': ROI_right}
        uv = {'left': (u_l, v_l), 'right': (u_r, v_r)}
        if side is None:
            return ROI, uv
        else:
            return ROI[side], uv[side]
예제 #17
0
            topics=['/basler_stereo/right/image_rect_color/compressed']):
        if msg.header.stamp == img_msg[0].header.stamp:
            img_msg[1] = msg
            break

    # Read all PSM1 pose messages (instrument TCP wrt. base frame)
    psm1_msgs = [
        msg for topic, msg, stamp in bag.read_messages(
            topics=['/dvrk/PSM1/position_cartesian_current'])
    ]

# Find PSM1 pose message corresponding (nearest time stamp) to the camera frames
psm1_msg = find_nearest_by_stamp(psm1_msgs, img_msg[0].header.stamp)[1]

# Set up stereo camera model from the image_geometry distributed with ROS
stereo_model = image_geometry.StereoCameraModel()
stereo_model.fromCameraInfo(*cam_info)

# Get robot base to optical (left camera of stereo pair) transformation (the
# 'optical' frame seen wrt. the PSM1 robot 'base' frame)
t_base_optical = msg2tf(
    tf_buffer.lookup_transform_core('PSM1_base', 'stereo_optical',
                                    rospy.Time()).transform)
t_optical_base = np.linalg.inv(t_base_optical)

# t_base_tcp = np.array([msg2tf(m.pose) for m in psm1_msgs])
# t_optical_tcp = np.array([t_optical_base.dot(t) for t in t_base_tcp])
t_base_tcp = msg2tf(psm1_msg.pose)
t_optical_tcp = t_optical_base.dot(t_base_tcp)

# de-compress images
# -

tf_listener = tf.TransformListener()

tf_listener.getFrameStrings()

import time
time.sleep(5)
ecm.move_joint(HARDCODED_ECM_POS)

# +
pick_and_place_utils = None
from pick_and_place_utils import get_feat_position_and_img, tf_to_pykdl_frame, PSM_J1_TO_BASE_LINK_TF
import image_geometry

stereo_cam = image_geometry.StereoCameraModel()
stereo_cam.fromCameraInfo(left_camera_info, right_camera_info)
balls, left_frame = get_feat_position_and_img(left_image_msg, right_image_msg, stereo_cam)
ball_pos_cam = balls[0]
print(ball_pos_cam)
plt.imshow(left_frame)
# depth error experiments
# (-0.02772727272727273, 0.009545454545454546, 0.14180015986663413) {-0.02987164259, 0.01018744707, 0.1481492519}
# (-0.028636363636363637, 0.01, 0.14180015986663413) {-0.02869459987, 0.009940028191, 0.1379978657} 
# (-0.013333333333333334, 0.010416666666666666, 0.12998347987774794) {-0.01380112767, 0.009659290314, 0.1300171614} 
# (0.04038461538461539, 0.008076923076923077, 0.11998475065638273) {-0.009348809719, 0.01096570492, 0.1307697296} 
# -

ball_pos_cam = PyKDL.Vector(*ball_pos_cam)
tf_cam_to_jp21 = tf_to_pykdl_frame(tf_listener.lookupTransform('ecm_pitch_link_1', 'camera', rospy.Time()))
ball_pos_jp21 = tf_cam_to_jp21 * ball_pos_cam
예제 #19
0
    def threshRed(self):
        THRESH_RED_WIDTH_ROI = 200
        THRESH_RED_HEIGHT_ROI = 90

        if not self.img.has_key('left') or not self.img.has_key('right'):
            return

        leftImg = cv.CloneMat(self.img['left'])
        rightImg = cv.CloneMat(self.img['right'])
        width = cv.GetSize(leftImg)[0]
        height = cv.GetSize(leftImg)[1]

        estimated_gripper_pose, time_since_detection = self.getEstimatedPose()

        gripper_pos = tfx.pose(estimated_gripper_pose).position
        tf_tool_to_cam = tfx.lookupTransform('/left_BC',
                                             gripper_pos.frame,
                                             wait=10)
        gripper_pos = tf_tool_to_cam * gripper_pos
        stereoModel = image_geometry.StereoCameraModel()
        stereoModel.fromCameraInfo(self.info['left'], self.info['right'])
        (u_l, v_l), (u_r, v_r) = stereoModel.project3dToPixel(gripper_pos.list)

        def expandROIRegion(start_ROI,
                            elapsed_time,
                            max_elapsed_time=20.,
                            growth_factor=2.):
            elapsed_time = min(elapsed_time, max_elapsed_time)

            return int(start_ROI + start_ROI *
                       (elapsed_time / max_elapsed_time) * growth_factor)

        #THRESH_RED_WIDTH_ROI = expandROIRegion(THRESH_RED_WIDTH_ROI, time_since_detection)
        #THRESH_RED_HEIGHT_ROI = expandROIRegion(THRESH_RED_HEIGHT_ROI, time_since_detection)

        leftImg_lb_width = int(
            u_l -
            THRESH_RED_WIDTH_ROI) if int(u_l - THRESH_RED_WIDTH_ROI) > 0 else 0
        leftImg_ub_width = int(u_l + THRESH_RED_WIDTH_ROI) if int(
            u_l + THRESH_RED_WIDTH_ROI) < width else width - 1
        leftImg_lb_height = int(v_l - THRESH_RED_HEIGHT_ROI) if int(
            v_l - THRESH_RED_HEIGHT_ROI) > 0 else 0
        leftImg_ub_height = int(v_l + THRESH_RED_HEIGHT_ROI) if int(
            v_l + THRESH_RED_HEIGHT_ROI) < height else height - 1
        rightImg_lb_width = int(
            u_r -
            THRESH_RED_WIDTH_ROI) if int(u_r - THRESH_RED_WIDTH_ROI) > 0 else 0
        rightImg_ub_width = int(u_r + THRESH_RED_WIDTH_ROI) if int(
            u_r + THRESH_RED_WIDTH_ROI) < width else width - 1
        rightImg_lb_height = int(v_r - THRESH_RED_HEIGHT_ROI) if int(
            v_r - THRESH_RED_HEIGHT_ROI) > 0 else 0
        rightImg_ub_height = int(v_r + THRESH_RED_HEIGHT_ROI) if int(
            v_r + THRESH_RED_HEIGHT_ROI) < height else height - 1

        if self.print_messages:
            print('(height, width) = ({0}, {1})'.format(height, width))
            print('time_since_detection: {0}'.format(time_since_detection))
            print('leftImg[{0}:{1},{2}:{3}]'.format(leftImg_lb_height,
                                                    leftImg_ub_height,
                                                    leftImg_lb_width,
                                                    leftImg_ub_width))
            print('rightImg[{0}:{1},{2}:{3}]'.format(rightImg_lb_height,
                                                     rightImg_ub_height,
                                                     rightImg_lb_width,
                                                     rightImg_ub_width))

        if leftImg_lb_width >= leftImg_ub_width or leftImg_lb_height >= leftImg_ub_height or rightImg_lb_width >= rightImg_ub_width or rightImg_lb_height >= rightImg_ub_height:
            return

        leftImg = leftImg[leftImg_lb_height:leftImg_ub_height,
                          leftImg_lb_width:leftImg_ub_width]
        rightImg = rightImg[rightImg_lb_height:rightImg_ub_height,
                            rightImg_lb_width:rightImg_ub_width]
        if self.show_thresh_red_images:
            self.red_roi_pub.publish(self.bridge.cv_to_imgmsg(leftImg))

        leftThresh = cv.CreateImage(cv.GetSize(leftImg), 8, 1)
        rightThresh = cv.CreateImage(cv.GetSize(rightImg), 8, 1)
        leftThresh = threshold(leftImg, leftImg, leftThresh, RED_LOWER,
                               RED_UPPER)
        rightThresh = threshold(rightImg, rightImg, rightThresh, RED_LOWER,
                                RED_UPPER)

        if self.show_thresh_red_images:
            self.red_thresh_roi_pub.publish(
                self.bridge.cv_to_imgmsg(leftThresh))

        leftContours = find_contours(leftThresh, "leftThresh")
        rightContours = find_contours(rightThresh, "rightThresh")
        foundRed = True
        if len(leftContours) > 0 or len(rightContours) > 0:
            self.lastFoundRed = rospy.Time.now()
            foundRed = True
        else:
            if self.lastFoundRed is not None and rospy.Time.now(
            ) - self.lastFoundRed < self.redHistoryDuration:
                foundRed = True
            else:
                foundRed = False

        toolPose = tfx.pose([0, 0, 0],
                            frame=self.tool_frame,
                            stamp=rospy.Time.now()).msg.PoseStamped()
        if foundRed:
            self.red_thresh_marker_pub.publish(
                createMarker(toolPose, color="red"))
        else:
            self.red_thresh_marker_pub.publish(
                createMarker(toolPose, color="blue"))

        self.red_thresh_pub.publish(Bool(foundRed))
예제 #20
0
import os
import pickle
import sys
import tfx
import time
np.set_printoptions(suppress=True)

# ------------
# dVRK-related
# ------------

ESC_KEYS     = [27, 1048603]
camerainfo_p = '/home/davinci0/danielseita/suturing/config/camera_info_matrices/'
C_LEFT_INFO  = pickle.load(open(camerainfo_p+'left.p',  'r'))
C_RIGHT_INFO = pickle.load(open(camerainfo_p+'right.p', 'r'))
STEREO_MODEL = image_geometry.StereoCameraModel()
STEREO_MODEL.fromCameraInfo(C_LEFT_INFO, C_RIGHT_INFO)


def camera_pixels_to_camera_coords(left_pt, right_pt, nparrays=False):
    """ Given [lx,ly] and [rx,ry], determine [cx,cy,cz]. Everything should be LISTS. """
    assert len(left_pt) == len(right_pt) == 2
    disparity = np.linalg.norm( np.array(left_pt) - np.array(right_pt) )
    (xx,yy,zz) = STEREO_MODEL.projectPixelTo3d( (left_pt[0],left_pt[1]), disparity )
    if nparrays:
        return np.array([xx,yy,zz])
    else:
        return [xx,yy,zz] 


def init(sleep_time=0):