예제 #1
0
def main(psmName):
    rospy.init_node('dvrk_registration', anonymous=True)
    robot = psm(psmName)

    frameRate = 15
    slop = 1.0 / frameRate
    cams = StereoCameras("left/image_rect",
                         "right/image_rect",
                         "left/camera_info",
                         "right/camera_info",
                         slop=slop)

    tfSync = CameraSync(
        '/stereo/left/camera_info',
        topics=['/dvrk/' + psmName + '/position_cartesian_current'],
        frames=[
            psmName + '_psm_base_link', psmName + '_tool_wrist_link',
            psmName + '_tool_wrist_caudier_link_shaft'
        ])

    camModel = StereoCameraModel()
    topicLeft = rospy.resolve_name("left/camera_info")
    msgL = rospy.wait_for_message(topicLeft, CameraInfo, 10)
    topicRight = rospy.resolve_name("right/camera_info")
    msgR = rospy.wait_for_message(topicRight, CameraInfo, 10)
    camModel.fromCameraInfo(msgL, msgR)

    # Set up GUI
    filePath = rospy.get_param('~registration_yaml')
    print(filePath)
    with open(filePath, 'r') as f:
        data = yaml.load(f)
    if any(k not in data
           for k in ['H', 'minS', 'minV', 'maxV', 'transform', 'points']):

        rospy.logfatal('dVRK Registration: ' + filePath +
                       ' empty or malformed.')
        quit()

    cv2.namedWindow(_WINDOW_NAME)

    transformOld = np.array(data['transform'])

    toolOffset = data[
        'toolOffset']  # distance from pinching axle to center of orange nub
    points = np.array(
        data['points'])  # Set of points in robots frame to register against

    # Wait for registration to start
    displayRegistration(cams, camModel, toolOffset, transformOld, tfSync)

    print('Done')
    cv2.destroyAllWindows()
    def test_stereo(self):
        lmsg = sensor_msgs.msg.CameraInfo()
        rmsg = sensor_msgs.msg.CameraInfo()
        for m in (lmsg, rmsg):
            m.width = 640
            m.height = 480

        # These parameters taken from a real camera calibration
        lmsg.D =  [-0.363528858080088, 0.16117037733986861, -8.1109585007538829e-05, -0.00044776712298447841, 0.0]
        lmsg.K =  [430.15433020105519, 0.0, 311.71339830549732, 0.0, 430.60920415473657, 221.06824942698509, 0.0, 0.0, 1.0]
        lmsg.R =  [0.99806560714807102, 0.0068562422224214027, 0.061790256276695904, -0.0067522959054715113, 0.99997541519165112, -0.0018909025066874664, -0.061801701660692349, 0.0014700186639396652, 0.99808736527268516]
        lmsg.P =  [295.53402059708782, 0.0, 285.55760765075684, 0.0, 0.0, 295.53402059708782, 223.29617881774902, 0.0, 0.0, 0.0, 1.0, 0.0]

        rmsg.D =  [-0.3560641041112021, 0.15647260261553159, -0.00016442960757099968, -0.00093175810713916221]
        rmsg.K =  [428.38163131344191, 0.0, 327.95553847249192, 0.0, 428.85728580588329, 217.54828640915309, 0.0, 0.0, 1.0]
        rmsg.R =  [0.9982082576219119, 0.0067433328293516528, 0.059454199832973849, -0.0068433268864187356, 0.99997549128605434, 0.0014784127772287513, -0.059442773257581252, -0.0018826283666309878, 0.99822993965212292]
        rmsg.P =  [295.53402059708782, 0.0, 285.55760765075684, -26.507895206214123, 0.0, 295.53402059708782, 223.29617881774902, 0.0, 0.0, 0.0, 1.0, 0.0]

        cam = StereoCameraModel()
        cam.fromCameraInfo(lmsg, rmsg)

        for x in (16, 320, m.width - 16):
            for y in (16, 240, m.height - 16):
                for d in range(1, 10):
                    pt3d = cam.projectPixelTo3d((x, y), d)
                    ((lx, ly), (rx, ry)) = cam.project3dToPixel(pt3d)
                    self.assertAlmostEqual(y, ly, 3)
                    self.assertAlmostEqual(y, ry, 3)
                    self.assertAlmostEqual(x, lx, 3)
                    self.assertAlmostEqual(x, rx + d, 3)
        
        u = 100.0
        v = 200.0
        du = 17.0
        dv = 23.0
        Z = 2.0
        xyz0 = cam.left.projectPixelTo3dRay((u, v))
        xyz0 = (xyz0[0] * (Z / xyz0[2]), xyz0[1] * (Z / xyz0[2]), Z)
        xyz1 = cam.left.projectPixelTo3dRay((u + du, v + dv))
        xyz1 = (xyz1[0] * (Z / xyz1[2]), xyz1[1] * (Z / xyz1[2]), Z)
        self.assertAlmostEqual(cam.left.getDeltaU(xyz1[0] - xyz0[0], Z), du, 3)
        self.assertAlmostEqual(cam.left.getDeltaV(xyz1[1] - xyz0[1], Z), dv, 3)
        self.assertAlmostEqual(cam.left.getDeltaX(du, Z), xyz1[0] - xyz0[0], 3)
        self.assertAlmostEqual(cam.left.getDeltaY(dv, Z), xyz1[1] - xyz0[1], 3)
예제 #3
0
    def test_stereo(self):
        lmsg = sensor_msgs.msg.CameraInfo()
        rmsg = sensor_msgs.msg.CameraInfo()
        for m in (lmsg, rmsg):
            m.width = 640
            m.height = 480

        # These parameters taken from a real camera calibration
        lmsg.D =  [-0.363528858080088, 0.16117037733986861, -8.1109585007538829e-05, -0.00044776712298447841, 0.0]
        lmsg.K =  [430.15433020105519, 0.0, 311.71339830549732, 0.0, 430.60920415473657, 221.06824942698509, 0.0, 0.0, 1.0]
        lmsg.R =  [0.99806560714807102, 0.0068562422224214027, 0.061790256276695904, -0.0067522959054715113, 0.99997541519165112, -0.0018909025066874664, -0.061801701660692349, 0.0014700186639396652, 0.99808736527268516]
        lmsg.P =  [295.53402059708782, 0.0, 285.55760765075684, 0.0, 0.0, 295.53402059708782, 223.29617881774902, 0.0, 0.0, 0.0, 1.0, 0.0]

        rmsg.D =  [-0.3560641041112021, 0.15647260261553159, -0.00016442960757099968, -0.00093175810713916221]
        rmsg.K =  [428.38163131344191, 0.0, 327.95553847249192, 0.0, 428.85728580588329, 217.54828640915309, 0.0, 0.0, 1.0]
        rmsg.R =  [0.9982082576219119, 0.0067433328293516528, 0.059454199832973849, -0.0068433268864187356, 0.99997549128605434, 0.0014784127772287513, -0.059442773257581252, -0.0018826283666309878, 0.99822993965212292]
        rmsg.P =  [295.53402059708782, 0.0, 285.55760765075684, -26.507895206214123, 0.0, 295.53402059708782, 223.29617881774902, 0.0, 0.0, 0.0, 1.0, 0.0]

        cam = StereoCameraModel()
        cam.fromCameraInfo(lmsg, rmsg)

        for x in (16, 320, m.width - 16):
            for y in (16, 240, m.height - 16):
                for d in range(1, 10):
                    pt3d = cam.projectPixelTo3d((x, y), d)
                    ((lx, ly), (rx, ry)) = cam.project3dToPixel(pt3d)
                    self.assertAlmostEqual(y, ly, 3)
                    self.assertAlmostEqual(y, ry, 3)
                    self.assertAlmostEqual(x, lx, 3)
                    self.assertAlmostEqual(x, rx + d, 3)
        
        u = 100.0
        v = 200.0
        du = 17.0
        dv = 23.0
        Z = 2.0
        xyz0 = cam.left.projectPixelTo3dRay((u, v))
        xyz0 = (xyz0[0] * (Z / xyz0[2]), xyz0[1] * (Z / xyz0[2]), Z)
        xyz1 = cam.left.projectPixelTo3dRay((u + du, v + dv))
        xyz1 = (xyz1[0] * (Z / xyz1[2]), xyz1[1] * (Z / xyz1[2]), Z)
        self.assertAlmostEqual(cam.left.getDeltaU(xyz1[0] - xyz0[0], Z), du, 3)
        self.assertAlmostEqual(cam.left.getDeltaV(xyz1[1] - xyz0[1], Z), dv, 3)
        self.assertAlmostEqual(cam.left.getDeltaX(du, Z), xyz1[0] - xyz0[0], 3)
        self.assertAlmostEqual(cam.left.getDeltaY(dv, Z), xyz1[1] - xyz0[1], 3)
예제 #4
0
class Curiosity():
    def __init__(self):
        saliency_map_sub = rospy.Subscriber("/saliency_map",
                                            Float32MultiArray,
                                            self.saliency_map_callback,
                                            queue_size=1,
                                            buff_size=2**24)
        camera_info_left_sub = rospy.Subscriber("/camera_left/camera_info",
                                                CameraInfo,
                                                self.camera_info_left_callback,
                                                queue_size=1,
                                                buff_size=2**24)
        camera_info_right_sub = rospy.Subscriber(
            "/camera_right/camera_info",
            CameraInfo,
            self.camera_info_right_callback,
            queue_size=1,
            buff_size=2**24)
        point_sub = rospy.Subscriber("/saccade_point",
                                     PointStamped,
                                     self.saccade_point_callback,
                                     queue_size=1,
                                     buff_size=2**24)
        disparity_sub = rospy.Subscriber("/camera/disparity",
                                         DisparityImage,
                                         self.disparity_callback,
                                         queue_size=1,
                                         buff_size=2**24)

        self.saliency_map_curiosity_pub = rospy.Publisher(
            "/saliency_map_curiosity", Float32MultiArray, queue_size=1)
        self.saliency_map_curiosity_image_pub = rospy.Publisher(
            "/saliency_map_curiosity_image", Image, queue_size=1)

        self.transform_proxy = rospy.ServiceProxy("/transform", Transform)

        self.camera_info_left = None
        self.camera_info_right = None
        self.disparity_image = None
        self.camera_model = StereoCameraModel()

        self.targets = []

        self.cv_bridge = CvBridge()

        self.saliency_width = float(rospy.get_param('~saliency_width', '256'))
        self.saliency_height = float(rospy.get_param('~saliency_height',
                                                     '192'))
        self.min_disparity = rospy.get_param(
            "/camera/stereo_image_proc/min_disparity", "-16")

        self.tfBuffer = tf2_ros.Buffer(rospy.Duration(30))
        self.listener = tf2_ros.TransformListener(self.tfBuffer)

    def saliency_map_callback(self, saliency_map):
        if self.camera_info_left is not None and self.camera_info_right is not None and self.disparity_image is not None:
            # handle input
            lo = saliency_map.layout
            saliency_map = np.asarray(
                saliency_map.data[lo.data_offset:]).reshape(
                    lo.dim[0].size, lo.dim[1].size)

            # modify saliency map
            self.camera_model.fromCameraInfo(self.camera_info_left,
                                             self.camera_info_right)
            disparity_image = self.cv_bridge.imgmsg_to_cv2(
                self.disparity_image.image)
            for point in self.targets:
                point_new = geometry_msgs.msg.PointStamped()
                point_new.header = point.header
                point_new.point = point.point
                transformed = self.transform_proxy(point_new).res
                transformed_new = tf2_geometry_msgs.PointStamped()
                transformed_new.header = transformed.header
                transformed_new.point = transformed.point
                transformed = transformed_new
                point_torso = (-transformed.point.y, -transformed.point.z,
                               transformed.point.x)
                pixel = self.camera_model.project3dToPixel(point_torso)
                x = int(
                    pixel[0][0] *
                    (self.saliency_width / float(self.camera_info_left.width)))
                y = int(pixel[0][1] * (self.saliency_height /
                                       float(self.camera_info_left.height)))
                disparity = self.camera_model.getDisparity(point_torso[2])
                x = x + disparity
                if x >= 0 and x < self.saliency_width and y >= 0 and y < self.saliency_height:
                    rr, cc = circle(y, x, 25,
                                    (len(saliency_map), len(saliency_map[0])))
                    saliency_map[rr, cc] = 0.

            self.saliency_map_curiosity_pub.publish(
                Float32MultiArray(layout=lo, data=saliency_map.flatten()))

            saliency_map_image = (saliency_map - saliency_map.min()) / (
                saliency_map.max() - saliency_map.min()) * 255.
            saliency_map_image = np.uint8(saliency_map_image)

            try:
                saliency_map_curiosity_image = self.cv_bridge.cv2_to_imgmsg(
                    saliency_map_image, "mono8")
            except CvBridgeError as e:
                print e

            self.saliency_map_curiosity_image_pub.publish(
                saliency_map_curiosity_image)

        else:
            rospy.loginfo(
                "Received saliency map but camera information is missing")

    def camera_info_left_callback(self, camera_info):
        self.camera_info_left = camera_info

    def camera_info_right_callback(self, camera_info):
        self.camera_info_right = camera_info

    def disparity_callback(self, disparity_image):
        self.disparity_image = disparity_image

    def saccade_point_callback(self, point):
        self.targets.append(point)
예제 #5
0
class tool_tracker:
    def __init__(self):
        self.bridge = CvBridge()
        # Create camera model for calculating 3d position
        self.cam_model = StereoCameraModel()
        msgL = rospy.wait_for_message("/stereo/left/camera_info", CameraInfo,
                                      1)
        msgR = rospy.wait_for_message("/stereo/right/camera_info", CameraInfo,
                                      1)
        self.cam_model.fromCameraInfo(msgL, msgR)
        # Set up subscribers for camera images
        self.image_r_sub = rospy.Subscriber("/stereo/right/image_rect_color",
                                            Image, self.image_r_callback)
        self.imageL_sub = rospy.Subscriber("/stereo/left/image_rect_color",
                                           Image, self.imageL_callback)
        # Set up blank image for left camera to update
        self.imageL = np.zeros((1, 1, 3), np.uint8)
        # Set up GUI
        cv2.namedWindow('image')
        cv2.createTrackbar('H', 'image', 0, 180, self.nothing_cb)
        cv2.createTrackbar('min S', 'image', 80, 255, self.nothing_cb)
        cv2.createTrackbar('min V', 'image', 80, 255, self.nothing_cb)
        cv2.createTrackbar('max V', 'image', 255, 255, self.nothing_cb)
        cv2.createTrackbar('masked', 'image', 0, 1, self.nothing_cb)

    def nothing_cb(self, data):
        pass

    def mask(self, img):
        # Convert to HSV and mask colors
        h = cv2.getTrackbarPos('H', 'image')
        sMin = cv2.getTrackbarPos('min S', 'image')
        vMin = cv2.getTrackbarPos('min V', 'image')
        vMax = cv2.getTrackbarPos('max V', 'image')
        hueShift = 0
        if h < 20 or h > 160:
            hueShift = 60
        colorLower = ((h - 5 + hueShift) % 180, sMin, vMin)
        colorUpper = ((h + 5 + hueShift) % 180, 255, vMax)
        blurred = cv2.GaussianBlur(img, (31, 31), 0)
        hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
        hsv[:, :, 0] = (hsv[:, :, 0] + hueShift) % 180
        mask = cv2.inRange(hsv, colorLower, colorUpper)
        # Refine mask
        mask = cv2.erode(mask, None, iterations=1)
        mask = cv2.dilate(mask, None, iterations=2)
        return mask

    def makeHSV(self, img):
        blurred = cv2.GaussianBlur(img, (21, 21), 0)
        hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
        hsv[:, :, 2] = 255
        return cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)

    def get_centroid(self, maskImage):
        # With help from http://www.pyimagesearch.com/2015/09/14/ball-tracking-with-opencv/
        # find contours in the mask and initialize the current
        # (x, y) center of the ball
        cnts = cv2.findContours(maskImage.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)[-2]
        center = None

        # only proceed if at least one contour was found
        if len(cnts) > 0:
            # find the largest contour in the mask, then use
            # it to compute the minimum enclosing circle and
            # centroid
            c = max(cnts, key=cv2.contourArea)
            ((x, y), radius) = cv2.minEnclosingCircle(c)
            M = cv2.moments(c)
            center = (M["m10"] / M["m00"], M["m01"] / M["m00"])
            # only proceed if the radius meets a minimum size
            if radius > 2:
                return center
        # Otherwise return nonsense
        return None

    def imageL_callback(self, data):
        try:
            self.imageL = self.bridge.imgmsg_to_cv2(data, "bgr8")
        except CvBridgeError as e:
            print e

    def image_r_callback(self, data):
        try:
            imageR = self.bridge.imgmsg_to_cv2(data, "bgr8")
        except CvBridgeError as e:
            print e

        # Process left image if it exists
        (rows, cols, channels) = self.imageL.shape

        if cols > 60 and rows > 60:
            maskImageL = self.mask(self.imageL)
            centerL = self.get_centroid(maskImageL)

        # if it doesn't exist, don't do anything
        else:
            return

        (rows, cols, channels) = imageR.shape
        if cols > 60 and rows > 60:
            maskImageR = self.mask(imageR)
            centerR = self.get_centroid(maskImageR)
        else:
            return

        if (centerL != None and centerR != None):
            pixel_shift = 1
            point3D = self.cam_model.projectPixelTo3d(centerL,
                                                      centerL[0] - centerR[0])
            #print(centerL)
            print(point3D)

            cv2.circle(self.imageL, (int(centerL[0]), int(centerL[1])), 3,
                       (0, 255, 0), -1, cv2.CV_AA)

            cv2.circle(imageR, (int(centerR[0]), int(centerR[1])), 3,
                       (0, 255, 0), -1, cv2.CV_AA)

        if cv2.getTrackbarPos('masked', 'image') == 0:
            # Draw centroids
            doubleImage = np.zeros((rows, cols * 2, channels), np.uint8)
            doubleImage[0:rows, 0:cols] = self.imageL
            doubleImage[0:rows, cols:cols * 2] = imageR
        else:
            doubleImage = np.zeros((rows, cols * 2), np.uint8)
            doubleImage[0:rows, 0:cols] = maskImageL
            doubleImage[0:rows, cols:cols * 2] = maskImageR
        cv2.imshow("image", doubleImage)
        cv2.waitKey(3)
예제 #6
0
def main(psmName):
    rospy.init_node('dvrk_registration', anonymous=True)
    toolOffset = .012 # distance from pinching axle to center of orange nub

    robot = psm(psmName)
    rate = rospy.Rate(15) # 30hz
    scriptDirectory = os.path.dirname(os.path.abspath(__file__))
    filePath = os.path.join(scriptDirectory,'..','..','defaults','registration_params.yaml')
    print(filePath)
    with open(filePath, 'r') as f:
        data = yaml.load(f)
    if 'H' not in data:
        rospy.logwarn('dVRK Registration: defaults/registration_params.yaml empty or malformed. Using defaults for orange tip')
        data = {'H': 23, 'minS': 173, 'minV': 68, 'maxV': 255, 'transform':np.eye(4).tolist()}
    
    frameRate = 15
    slop = 1.0 / frameRate
    cams = StereoCameras( "left/image_rect",
                          "right/image_rect",
                          "left/camera_info",
                          "right/camera_info",
                          slop = slop)

    camModel = StereoCameraModel()
    topicLeft = rospy.resolve_name("left/camera_info")
    msgL = rospy.wait_for_message(topicLeft,CameraInfo,3);
    topicRight = rospy.resolve_name("right/camera_info")
    msgR = rospy.wait_for_message(topicRight,CameraInfo,3);
    camModel.fromCameraInfo(msgL,msgR)

    # Set up GUI
    cv2.namedWindow(_WINDOW_NAME)
    cv2.createTrackbar('H', _WINDOW_NAME, data['H'], 180, nothingCB)
    cv2.createTrackbar('min S', _WINDOW_NAME, data['minS'], 255, nothingCB)
    cv2.createTrackbar('min V', _WINDOW_NAME, data['minV'], 255, nothingCB)
    cv2.createTrackbar('max V', _WINDOW_NAME, data['maxV'], 255, nothingCB)
    cv2.createTrackbar('masked', _WINDOW_NAME, 0, 1, nothingCB)

    transformOld = np.array(data['transform']);

    # Wait for registration to begin
    while not rospy.is_shutdown():
        # Get last images
        imageL = cams.camL.image
        imageR = cams.camR.image

        # Wait for images to exist
        if type(imageR) == type(None) or type(imageL) == type(None):
            continue

        # Check we have valid images
        (rows,cols,channels) = imageL.shape
        if cols < 60 or rows < 60 or imageL.shape != imageR.shape:
            continue


        # Find 3D position of end effector
        zVector = robot.get_current_position().M.UnitZ()
        pVector = robot.get_current_position().p
        offset = np.array([zVector.x(), zVector.y(), zVector.z(), 0])
        offset = offset * toolOffset
        pos = np.matrix([pVector.x(), pVector.y(), pVector.z(), 1]) + offset;
        pos = pos.transpose()
        pos = np.linalg.inv(transformOld) * pos

        # Project position into 2d coordinates
        posL = camModel.left.project3dToPixel(pos)
        posL = [int(l) for l in posL]
        posR = camModel.right.project3dToPixel(pos)
        posR = [int(l) for l in posR]

        (rows,cols) = imageL.shape[0:2]
        posR = (posR[0] + cols, posR[1])

        point3d, image = calculate3DPoint(imageL, imageR, camModel)

        # Draw images and display them
        cv2.circle(image, tuple(posL), 2,(255, 255, 0), -1)
        cv2.circle(image, tuple(posR), 2,(255, 255, 0), -1)
        cv2.circle(image, tuple(posL), 7,(255, 255, 0), 2)
        cv2.circle(image, tuple(posR), 7,(255, 255, 0), 2)
        
        message = "Press s to start registration. Robot will move to its joint limits."
        cv2.putText(image, message, (50,50), cv2.FONT_HERSHEY_DUPLEX, 1, [0, 0, 255])
        message = "MAKE SURE AREA IS CLEAR"
        cv2.putText(image, message, (50,100), cv2.FONT_HERSHEY_DUPLEX, 1, [0, 0, 255])
        cv2.imshow(_WINDOW_NAME, image)
        key = cv2.waitKey(1)
        if key == 27:
            cv2.destroyAllWindows() 
            quit()  # esc to quit
        elif chr(key%256) == 's' or chr(key%256) == 'S':
            break # s to continue
        rate.sleep()
    np.random.seed(1)
    
    # Main registration
    points = np.array([[ 0.10, 0.00,-0.15],
                       [ 0.05, 0.10,-0.18], 
                       [-0.04, 0.13,-0.15], 
                       [ 0.05, 0.05,-0.18], 
                       [-0.05,-0.02,-0.15]])
    print points
    pointsCam = np.empty(points.shape)
    for i, point in enumerate(points):
        if rospy.is_shutdown():
            quit()
        if not robot.move(PyKDL.Vector(point[0], point[1], point[2])):
            rospy.logfatal("dVRK Registration: Unable to move robot")
            quit()
        rospy.sleep(.1)
        pBuffer = deque([], 50)
        rBuffer = deque([], 50)
        zVector = robot.get_current_position().M.UnitZ()
        offset = np.array([zVector.x(), zVector.y(), zVector.z()])
        offset = offset * toolOffset
        startTime = rospy.get_time()

        while rospy.get_time() - startTime < 1:
            # Get last images
            imageL = cams.camL.image
            imageR = cams.camR.image

            point3d, image = calculate3DPoint(imageL, imageR, camModel)
            if type(image) != type(None):
                cv2.imshow(_WINDOW_NAME, image)
                key = cv2.waitKey(1)
                if key == 27: 
                    cv2.destroyAllWindows()
                    quit()
            rate.sleep()
            if point3d != None:
                pBuffer.append(point3d)
                pVector = robot.get_current_position().p
                rBuffer.append(np.array([pVector.x(), pVector.y(), pVector.z()]) + offset)
                
        pointsCam[i,:] = np.median(pBuffer,0)
        points[i,:] = np.median(rBuffer,0)
        print("Using median of %d values: (%f, %f, %f)" % (len(pBuffer),
                                                          pointsCam[i,0],
                                                          pointsCam[i,1],
                                                          pointsCam[i,2]))
    
    (rot, pos) = rigidTransform3D(np.mat(pointsCam), np.mat(points))
    calculateRMSE(np.mat(pointsCam), np.mat(points), rot, pos)

    # retval, out, inliers = cv2.estimateAffine3D(pointsCam, points)
    out = np.hstack((rot,pos))
    transform = np.matrix(np.vstack((out,[0, 0, 0, 1])))

    # Save all parameters to YAML file
    data['transform'] = transform.tolist()
    data['H'] = cv2.getTrackbarPos('H',_WINDOW_NAME)
    data['minS'] = cv2.getTrackbarPos('min S',_WINDOW_NAME)
    data['minV'] = cv2.getTrackbarPos('min V',_WINDOW_NAME)
    data['maxV'] = cv2.getTrackbarPos('max V',_WINDOW_NAME)
    with open(filePath, 'w') as f:
        yaml.dump(data,f)

    # Evaluate registration
    while not rospy.is_shutdown():
        # Get last images
        imageL = cams.camL.image
        imageR = cams.camR.image

        # Wait for images to exist
        if type(imageR) == type(None) or type(imageL) == type(None):
            continue


        # Check we have valid images
        (rows,cols,channels) = imageL.shape
        if cols < 60 or rows < 60 or imageL.shape != imageR.shape:
            continue

        # Find 3D position of end effector
        zVector = robot.get_current_position().M.UnitZ()
        pVector = robot.get_current_position().p
        offset = np.array([zVector.x(), zVector.y(), zVector.z(), 0])
        offset = offset * toolOffset
        pos = np.matrix([pVector.x(), pVector.y(), pVector.z(), 1]) + offset;
        pos = pos.transpose()
        pos = np.linalg.inv(transform) * pos

        # Project position into 2d coordinates
        posL = camModel.left.project3dToPixel(pos)
        posL = [int(l) for l in posL]
        posR = camModel.right.project3dToPixel(pos)
        posR = [int(l) for l in posR]

        # Draw images and display them
        cv2.circle(imageL, tuple(posL), 2,(255, 255, 0), -1)
        cv2.circle(imageR, tuple(posR), 2,(255, 255, 0), -1)
        cv2.circle(imageL, tuple(posL), 7,(255, 255, 0), 2)
        cv2.circle(imageR, tuple(posR), 7,(255, 255, 0), 2)
        image = combineImages(imageL, imageR)
        cv2.imshow(_WINDOW_NAME, image)
        key = cv2.waitKey(1)
        if key == 27: 
            cv2.destroyAllWindows()
            quit()
        rate.sleep()

    print('Done')
    cv2.destroyAllWindows()
예제 #7
0
class Modifier():
    def __init__(self):
        self.saliency_map_modified_pub = rospy.Publisher(
            "/saliency_map_modified", Float32MultiArray, queue_size=1)
        self.saliency_map_modified_image_pub = rospy.Publisher(
            "/saliency_map_modified_image", Image, queue_size=1)

        self.camera_image = None
        self.camera_info_left = None
        self.camera_info_right = None
        self.disparity_image = None
        self.camera_model = StereoCameraModel()
        # self.camera_model = PinholeCameraModel()
        self.link_states = None

        self.tilt_eye = 0.
        self.pan_eye = 0.
        self.tilt_head = 0.
        self.pan_head = 0.

        self.tilt_eye_limit = 0.925025
        self.pan_eye_limit = 0.925025
        self.tilt_head_limit = 1.57
        self.pan_head_limit = 1.57

        self.cv_bridge = CvBridge()

        self.tfBuffer = tf2_ros.Buffer(rospy.Duration(30))
        self.listener = tf2_ros.TransformListener(self.tfBuffer)

        self.tilt_head_frame = 'hollie_neck_pitch_link'
        self.pan_head_frame = 'hollie_neck_yaw_link'
        self.tilt_eye_frame = 'swing_mesh'
        self.pan_eye_frame = 'camera_left_link'

        self.move_head = rospy.get_param("~move_head", False)
        self.move_eyes = rospy.get_param("~move_eyes", True)
        self.min_disparity = rospy.get_param(
            "/hollie/camera/stereo_image_proc/min_disparity", "-16")

        saliency_map_sub = rospy.Subscriber("/saliency_map",
                                            Float32MultiArray,
                                            self.saliency_map_callback,
                                            queue_size=1,
                                            buff_size=2**24)
        camera_info_left_sub = rospy.Subscriber(
            "/hollie/camera/left/camera_info",
            CameraInfo,
            self.camera_info_left_callback,
            queue_size=1,
            buff_size=2**24)
        camera_info_right_sub = rospy.Subscriber(
            "/hollie/camera/right/camera_info",
            CameraInfo,
            self.camera_info_right_callback,
            queue_size=1,
            buff_size=2**24)
        disparity_sub = rospy.Subscriber("/hollie/camera/disparity",
                                         DisparityImage,
                                         self.disparity_callback,
                                         queue_size=1,
                                         buff_size=2**24)
        link_state_sub = rospy.Subscriber("/gazebo/link_states",
                                          LinkStates,
                                          self.link_state_callback,
                                          queue_size=1,
                                          buff_size=2**24)
        joint_state_sub = rospy.Subscriber("/joint_states",
                                           JointState,
                                           self.joint_state_callback,
                                           queue_size=1,
                                           buff_size=2**24)

    def saliency_map_callback(self, saliency_map):
        if self.camera_info_left is not None and self.camera_info_right is not None and self.disparity_image is not None:
            # handle input
            lo = saliency_map.layout
            saliency_map = np.asarray(
                saliency_map.data[lo.data_offset:]).reshape(
                    lo.dim[0].size, lo.dim[1].size)

            # modify saliency map
            disparity_image = self.cv_bridge.imgmsg_to_cv2(
                self.disparity_image.image)
            self.camera_model.fromCameraInfo(self.camera_info_left,
                                             self.camera_info_right)
            for i in range(0, len(saliency_map)):
                for j in range(0, len(saliency_map[0])):
                    x = int(j * (float(self.camera_info_left.width) /
                                 len(saliency_map[0])))
                    y = int(i * (float(self.camera_info_left.height) /
                                 len(saliency_map)))

                    disparity = disparity_image[y][x]
                    if disparity < self.min_disparity:
                        disparity = 1.
                    point_eye = self.camera_model.projectPixelTo3d((x, y),
                                                                   disparity)

                    point_eye = (point_eye[2], point_eye[0], -point_eye[1])
                    pan_eye = self.pan_eye + math.atan2(
                        point_eye[1], point_eye[0])
                    tilt_eye = self.tilt_eye + math.atan2(
                        -point_eye[2],
                        math.sqrt(
                            math.pow(point_eye[0], 2) +
                            math.pow(point_eye[1], 2)))

                    if self.move_head:
                        # TODO: tricky since we cannot move the head to find out
                        print "TODO"
                    else:
                        if abs(pan_eye) > self.pan_eye_limit or abs(
                                tilt_eye) > self.tilt_eye_limit:
                            saliency_map[y][x] = 0.

            self.saliency_map_modified_pub.publish(
                Float32MultiArray(layout=lo, data=saliency_map.flatten()))

            try:
                saliency_map_modified_image = self.cv_bridge.cv2_to_imgmsg(
                    np.uint8(saliency_map * 455.), "mono8")
            except CvBridgeError as e:
                print e

            self.saliency_map_modified_image_pub.publish(
                saliency_map_modified_image)
            print "done"

        else:
            rospy.loginfo(
                "Received saliency map but camera information is missing")

    def camera_info_left_callback(self, camera_info):
        self.camera_info_left = camera_info

    def camera_info_right_callback(self, camera_info):
        self.camera_info_right = camera_info

    def disparity_callback(self, disparity_image):
        self.disparity_image = disparity_image

    def joint_state_callback(self, joint_state):
        self.pan_eye = joint_state.position[joint_state.name.index(
            "hollie_left_eye_pan_joint")]
        self.tilt_eye = joint_state.position[joint_state.name.index(
            "hollie_eyes_tilt_joint")]

    def link_state_callback(self, link_states):
        self.link_states = link_states
예제 #8
0
def main():
    rospy.init_node('dvrk_registration', anonymous=True)
    from tf_sync import CameraSync
    psmName = rospy.get_param('~arm')
    robot = psm(psmName)

    frameRate = 15
    slop = 1.0 / frameRate
    cams = StereoCameras("left/image_rect",
                         "right/image_rect",
                         "left/camera_info",
                         "right/camera_info",
                         slop=slop)

    tfSync = CameraSync(
        '/stereo/left/camera_info',
        topics=['/dvrk/' + psmName + '/position_cartesian_current'],
        frames=[
            psmName + '_psm_base_link', psmName + '_tool_wrist_link',
            psmName + '_tool_wrist_sca_shaft_link'
        ])

    camModel = StereoCameraModel()
    topicLeft = rospy.resolve_name("left/camera_info")
    msgL = rospy.wait_for_message(topicLeft, CameraInfo, 10)
    topicRight = rospy.resolve_name("right/camera_info")
    msgR = rospy.wait_for_message(topicRight, CameraInfo, 10)
    camModel.fromCameraInfo(msgL, msgR)

    # Set up GUI
    filePath = rospy.get_param('~registration_yaml')
    print(filePath)
    with open(filePath, 'r') as f:
        data = yaml.load(f)
    if any(k not in data
           for k in ['H', 'minS', 'minV', 'maxV', 'transform', 'points']):
        rospy.logfatal('dVRK Registration: ' + filePath +
                       ' empty or malformed.')
        quit()

    try:
        targetLink = psmName + data['toolOffsetFrame']
    except KeyError as e:
        targetLink = None

    cv2.namedWindow(_WINDOW_NAME)
    cv2.createTrackbar('H', _WINDOW_NAME, data['H'], 240, nothingCB)
    cv2.createTrackbar('min S', _WINDOW_NAME, data['minS'], 255, nothingCB)
    cv2.createTrackbar('min V', _WINDOW_NAME, data['minV'], 255, nothingCB)
    cv2.createTrackbar('max V', _WINDOW_NAME, data['maxV'], 255, nothingCB)
    cv2.createTrackbar('masked', _WINDOW_NAME, 0, 1, nothingCB)

    transformOld = np.array(data['transform'])

    toolOffset = data[
        'toolOffset']  # distance from pinching axle to center of orange nub
    points = np.array(
        data['points'])  # Set of points in robots frame to register against

    # Wait for registration to start
    displayRegistration(cams,
                        camModel,
                        toolOffset,
                        transformOld,
                        tfSync,
                        targetLink=targetLink)

    # Main registration
    (pointsA, pointsB) = getRegistrationPoints(points,
                                               robot,
                                               cams,
                                               camModel,
                                               toolOffset,
                                               tfSync,
                                               targetLink=targetLink)
    transform, transform2 = calculateRegistration(pointsA, pointsB)

    # Save all parameters to YAML file
    data['transform'] = transform.tolist()
    data['affine_transform'] = transform2.tolist()
    data['H'] = cv2.getTrackbarPos('H', _WINDOW_NAME)
    data['minS'] = cv2.getTrackbarPos('min S', _WINDOW_NAME)
    data['minV'] = cv2.getTrackbarPos('min V', _WINDOW_NAME)
    data['maxV'] = cv2.getTrackbarPos('max V', _WINDOW_NAME)
    with open(filePath, 'w') as f:
        yaml.dump(data, f)
        print("Saved to " + filePath)

    # Publish transform as message to camera_transform_pub
    msg = posemath.toMsg(posemath.fromMatrix(transform))
    pub = rospy.Publisher('set_camera_transform',
                          Pose,
                          latch=True,
                          queue_size=10)
    pub.publish(msg)

    # Show Registration
    displayRegistration(cams,
                        camModel,
                        toolOffset,
                        transform,
                        tfSync,
                        targetLink=targetLink)

    print('Done')
    cv2.destroyAllWindows()
예제 #9
0
class HeadManager():
    def __init__(self):
        self.tilt_eye_pub = rospy.Publisher("/eye_tilt", Float64, queue_size=1)
        self.pan_eye_left_pub = rospy.Publisher("/left_eye_pan", Float64, queue_size=1)
        self.pan_eye_right_pub = rospy.Publisher("/right_eye_pan", Float64, queue_size=1)
        self.tilt_head_pub = rospy.Publisher("/neck_pitch", Float64, queue_size=1)
        self.pan_head_pub = rospy.Publisher("neck_yaw", Float64, queue_size=1)
        self.roi_pub = rospy.Publisher("/roi", Image, queue_size=1)
        self.label_pub = rospy.Publisher("/label", String, queue_size=1)
        self.probe_pub = rospy.Publisher("/probe_results", String, queue_size=1)
        self.point_pub = rospy.Publisher("/saccade_point", PointStamped, queue_size=1)
        self.tilt_pub = rospy.Publisher("/tilt", Float64, queue_size=1)
        self.pan_pub = rospy.Publisher("/pan", Float64, queue_size=1)
        self.shift_pub = rospy.Publisher("/shift", std_msgs.msg.Empty, queue_size=1)
        self.status_pub = rospy.Publisher("/status", String, queue_size=1)

        self.recognizer = rospy.ServiceProxy('recognize', Roi)
        self.memory = rospy.ServiceProxy('new_object', NewObject)
        self.probe_label = rospy.ServiceProxy('probe_label', ProbeLabel)
        self.probe_coordinate = rospy.ServiceProxy('probe_coordinate', ProbeCoordinate)

        self.saccade_ser = rospy.Service('saccade', Target, self.saccade)
        self.look_ser = rospy.Service('look', Look, self.look)
        self.probe_ser = rospy.Service('probe', SetBool, self.probe)
        self.transform_ser = rospy.Service('transform', Transform, self.transform)

        self.camera_image = None
        self.camera_info_left = None
        self.camera_info_right = None
        self.disparity_image = None
        self.camera_model = StereoCameraModel()
        self.link_states = None

        self.tilt_eye = 0.
        self.pan_eye = 0.
        self.tilt_head = 0.
        self.pan_head = 0.

        self.tilt_eye_upper_limit = 0.4869469
        self.tilt_eye_lower_limit = -0.8220501
        self.pan_eye_limit = 0.77754418
        self.tilt_head_limit = math.pi/4
        self.pan_head_limit = math.pi/2

        self.saliency_width = float(rospy.get_param('~saliency_width', '256'))
        self.saliency_height = float(rospy.get_param('~saliency_height', '192'))
        self.move_eyes = rospy.get_param("~move_eyes", True)
        self.move_head = rospy.get_param("~move_head", True)
        self.shift = rospy.get_param("~shift", True)
        self.min_disparity = rospy.get_param("/camera/stereo_image_proc/min_disparity", "-16")
        self.recognize = rospy.get_param("~recognize", True)
        self.probe = rospy.get_param("~probe", False)

        self.cv_bridge = CvBridge()

        self.tfBuffer = tf2_ros.Buffer(rospy.Duration(30))
        self.listener = tf2_ros.TransformListener(self.tfBuffer)
        self.transform = None
        self.static_frame = "hollie_base_x_link"

        camera_sub = rospy.Subscriber("/camera_left/image_raw", Image, self.image_callback, queue_size=1, buff_size=2**24)
        camera_info_left_sub = rospy.Subscriber("/camera_left/camera_info", CameraInfo, self.camera_info_left_callback, queue_size=1, buff_size=2**24)
        camera_info_right_sub = rospy.Subscriber("/camera_right/image
_info", CameraInfo, self.camera_info_right_callback, queue_size=1, buff_size=2**24)
        disparity_sub = rospy.Subscriber("/camera/disparity", DisparityImage, self.disparity_callback, queue_size=1, buff_size=2**24)
        joint_state_sub = rospy.Subscriber("/joint_states", JointState, self.joint_state_callback, queue_size=1, buff_size=2**24)
        link_state_sub = rospy.Subscriber("/gazebo/link_states", LinkStates, self.link_state_callback, queue_size=1, buff_size=2**24)

    def saccade(self, saccade):
        if self.camera_image is None:
            rospy.loginfo("Received saccade but camera_image is missing")
            return False
        elif self.camera_info_left is None:
            rospy.loginfo("Received saccade but camera_info_left is missing")
            return False
        elif self.camera_info_right is None:
            rospy.loginfo("Received saccade but camera_info_right is missing")
            return False
        else:

            if self.transform is None:
                try:
                    self.transform = self.tfBuffer.lookup_transform("camera_left_link_optical", self.static_frame, rospy.Time(0))
                except:
                    rospy.loginfo("Transformation gone wrong, dropping")
                    self.status_pub.publish("dropping")
                    return False

            saccade = saccade.target

            # scale saccade target to camera image size
            x = int(saccade.x * (float(self.camera_image.width)/self.saliency_width))
            y = int(saccade.y * (float(self.camera_image.height)/self.saliency_height))

            # create and publish roi
            size = 25
            x1 = max(0, x - size)
            y1 = max(0, y - size)
            x2 = min(x + size, self.camera_image.width)
            y2 = min(y + size, self.camera_image.height)

            try:
                image = self.cv_bridge.imgmsg_to_cv2(self.camera_image, "bgr8")
            except CvBridgeError as e:
                print e
            roi = image[y1:y2, x1:x2]
            try:
                roi = self.cv_bridge.cv2_to_imgmsg(roi, "bgr8")
            except CvBridgeError as e:
                print e
            self.roi_pub.publish(roi)

            # get point in eye frame
            if self.disparity_image is None:
                # disparity of 0 => point is at infinity
                point_eye = self.camera_model.projectPixelTo3d((x, y), 0)
            else:
                disparity_image = self.cv_bridge.imgmsg_to_cv2(self.disparity_image.image)
                disparity = disparity_image[y][x] - (self.min_disparity - 2)
                self.camera_model.fromCameraInfo(self.camera_info_left, self.camera_info_right)
                distance = self.camera_model.getZ(disparity)
                point_eye = self.camera_model.projectPixelTo3d((x, y), disparity)

            point_eye = (point_eye[2], point_eye[0], -point_eye[1])
            print "point_eye: " + str(point_eye)

            pan_eye = self.pan_eye + math.atan2(point_eye[1], point_eye[0])
            tilt_eye = self.tilt_eye + math.atan2(-point_eye[2], math.sqrt(math.pow(point_eye[0], 2) + math.pow(point_eye[1], 2)))
            print "move_eye: %f, %f" % (tilt_eye, pan_eye)

            t = PointStamped()
            t.header.stamp = rospy.Time.now()
            t.header.frame_id = self.camera_model.tfFrame()
            t.point.x = point_eye[0]
            t.point.y = -point_eye[1]
            t.point.z = point_eye[2]

            # publish as static point for curiosity
            try:
                point_static = self.tfBuffer.transform(t, self.static_frame)
            except:
                rospy.loginfo("Transformation gone wrong, dropping")
                self.status_pub.publish("dropping")
                return False
            self.point_pub.publish(point_static)

            # publish as point in original eye frame
            point_wide_angle = self.tfBuffer.registration.get(type(point_static))(point_static, self.transform)

            point_wide_angle = (point_wide_angle.point.x, point_wide_angle.point.y, point_wide_angle.point.z)
            pan = -math.atan2(point_wide_angle[1], point_wide_angle[0])
            tilt = math.atan2(-point_wide_angle[2], math.sqrt(math.pow(point_wide_angle[0], 2) + math.pow(point_wide_angle[1], 2)))
            self.pan_pub.publish(pan)
            self.tilt_pub.publish(tilt)

            # transform to head frame
            try:
                point_head = self.tfBuffer.transform(t, "base_link")
            except:
                rospy.loginfo("Transformation gone wrong, dropping")
                self.status_pub.publish("dropping")
                return False
            point_head = (point_head.point.x, point_head.point.y, point_head.point.z)
            print "point_head: " + str(point_head)

            pan_head = self.pan_head + math.atan2(point_head[1], point_head[0])
            tilt_head = self.tilt_head + math.atan2(-point_head[2], math.sqrt(math.pow(point_head[0], 2) + math.pow(point_head[1], 2)))
            print "move_head: %f, %f" % (tilt_head, pan_head)

            if self.move_eyes:
                if abs(pan_eye) < self.pan_eye_limit and self.tilt_eye_lower_limit < tilt_eye and tilt_eye < self.tilt_eye_upper_limit:
                    rospy.loginfo("Moving eyes: %f, %f" % (pan_eye, tilt_eye))
                    self.pan_eye_left_pub.publish(pan_eye)
                    self.pan_eye_right_pub.publish(pan_eye)
                    self.tilt_eye_pub.publish(tilt_eye)
                elif self.move_head:
                    # trim head values
                    tilt_head_trimmed = tilt_head
                    pan_head_trimmed = pan_head
                    if tilt_head < -self.tilt_head_limit:
                        rospy.loginfo("Tilt_head under limit, trimming")
                        tilt_head_trimmed = -self.tilt_head_limit
                    elif tilt_head > self.tilt_head_limit:
                        rospy.loginfo("Tilt_head over limit, trimming")
                        tilt_head_trimmed = self.tilt_head_limit
                    if pan_head < -self.pan_head_limit:
                        rospy.loginfo("Pan_head under limit, trimming")
                        pan_head_trimmed = -self.pan_head_limit
                    elif pan_head > self.pan_head_limit:
                        rospy.loginfo("Pan_head over limit, trimming")
                        pan_head_trimmed = self.pan_head_limit

                    # move head
                    rospy.loginfo("Moving head: %f, %f" % (pan_head, tilt_head))
                    self.pan_head_pub.publish(pan_head_trimmed)
                    self.tilt_head_pub.publish(tilt_head_trimmed)

                    # transform to eye frame
                    t.point.y = -t.point.y
                    try:
                        point_eye = self.tfBuffer.transform(t, self.camera_model.tfFrame())
                    except:
                        rospy.loginfo("Transformation gone wrong, dropping")
                        self.status_pub.publish("dropping")
                        return False
                    point_eye = (point_eye.point.x, point_eye.point.y, point_eye.point.z)
                    print "point_eye after head movement: " + str(point_eye)

                    pan_eye = self.pan_eye + math.atan2(point_eye[1], point_eye[0])
                    tilt_eye = self.tilt_eye + math.atan2(-point_eye[2], math.sqrt(math.pow(point_eye[0], 2) + math.pow(point_eye[1], 2)))
                    print "move_eye after head movement: %f, %f" % (tilt_eye, pan_eye)

                    if abs(pan_eye) < self.pan_eye_limit and self.tilt_eye_lower_limit < tilt_eye and tilt_eye < self.tilt_eye_upper_limit:
                        rospy.loginfo("Moving eyes: %f, %f" % (tilt_eye, pan_eye))
                        self.pan_eye_left_pub.publish(pan_eye)
                        self.pan_eye_right_pub.publish(pan_eye)
                        self.tilt_eye_pub.publish(tilt_eye)
                    else:
                        rospy.loginfo("Over eye joint limit even though we moved head, dropping")
                        self.status_pub.publish("dropping")
                        return False
                else:
                    rospy.loginfo("Over eye joint limit and not moving head, dropping")
                    self.status_pub.publish("dropping")
                    return False
            else:
                rospy.loginfo("Not moving eyes, dropping")
                self.status_pub.publish("dropping")
                return False

            if self.shift:
                # shift activity
                self.shift_pub.publish(std_msgs.msg.Empty())

            if not self.recognize:
                return True

            try:
                # object recognition
                ans = self.recognizer(roi)
                label = ans.Label
                certainty = ans.Certainty
                self.label_pub.publish(label)
                rospy.loginfo("Got label %s with certainty %f" % (label, certainty))

                mem_x = pan / (2 * math.pi) * 100
                mem_y = tilt / (2 * math.pi) * 100

                if self.probe:
                    probe_ans = self.probe_coordinate(mem_x, mem_y)
                    if probe_ans.return_value and len(probe_ans.Label) > 0:
                        if probe_ans.Label[0] == label:
                            res = "Approved: %s still at the same place" % label
                        else:
                            res = "Changed: Found %s where %s was before" % (label, probe_ans.Label[0])
                    else:
                        res = "New: Found %s, which wasn't here before" % label
                    res = res + " at %d, %d" % (mem_x, mem_y)
                    rospy.loginfo(res)
                    self.probe_pub.publish(res)
                # store in memory
                self.memory(mem_x, mem_y, label)
                rospy.loginfo("Stored in memory at %d, %d" % (mem_x, mem_y))
            except rospy.ServiceException:
                rospy.loginfo("Recognize or memory service call failed")

        print
        return True

    def image_callback(self, camera_image):
        self.camera_image = camera_image

    def camera_info_left_callback(self, camera_info_left):
        self.camera_info_left = camera_info_left

    def camera_info_right_callback(self, camera_info_right):
        self.camera_info_right = camera_info_right

    def disparity_callback(self, disparity_image):
        self.disparity_image = disparity_image

    def joint_state_callback(self, joint_state):
        self.pan_eye = joint_state.position[joint_state.name.index("left_eye_pan")]
        self.tilt_eye = joint_state.position[joint_state.name.index("eye_tilt")]
        self.pan_head = -joint_state.position[joint_state.name.index("neck_yaw")]
        self.tilt_head = joint_state.position[joint_state.name.index("neck_pitch")]

    def link_state_callback(self, link_states):
        self.link_states = link_states

    # TODO: rework, adapt to global pan/tilt values
    def look(self, label):
        # ask memory for label
        p = self.probe_label(label.Label)

        if not p.return_value or len(p.X) == 0 or len(p.Y) == 0:
            return False

        x = p.X[0]
        y = p.Y[0]

        pan = x / 100 * (2 * math.pi)
        tilt = y / 100 * (2 * math.pi)

        # adjust camera
        if abs(pan) < self.pan_eye_limit and self.tilt_eye_lower_limit < tilt and tilt < self.tilt_eye_upper_limit:
            self.pan_eye_left_pub.publish(pan)
            self.pan_eye_right_pub.publish(pan)
            self.tilt_eye_pub.publish(tilt)
            return True
        else:
            rospy.loginfo("Cannot look at " + label.Label + ", over eye joint limit and don't know how to move head yet..")
            return False

    def probe(self, value):
        self.probe = value.data
        return (True, 'success')

    def transform(self, value):
        point_new = geometry_msgs.msg.PointStamped()
        point_new.header = value.req.header
        point_new.point = value.req.point
        transformed = self.tfBuffer.transform(point_new, self.camera_model.tfFrame())
        transformed_new = PointStamped()
        transformed_new.header = transformed.header
        transformed_new.point = transformed.point
        return transformed_new
예제 #10
0
class StereoImageNode(object):
    def __init__(self):

        # Parameters
        self.server = DynamicReconfigureServer(ConfigType,
                                               self.reconfigure_callback)
        self.numDisparities = self.convert_num_disparities(
            rospy.get_param('~numDisparities', 2))
        self.blockSize = self.convert_block_size(
            rospy.get_param('~blockSize', 8))

        # Stereo matcher and model
        self.block_matcher = cv2.StereoBM_create(
            numDisparities=self.numDisparities, blockSize=self.blockSize)
        self.model = StereoCameraModel()
        # TODO sg_block_matcher_ = cv::StereoSGBM::create(1, 1, 10);

        # Subscriber
        self.bridge = CvBridge()
        left_image_sub = message_filters.Subscriber(
            '/kitti/camera_gray_left/image_raw', Image)
        right_image_sub = message_filters.Subscriber(
            '/kitti/camera_gray_right/image_raw', Image)
        left_caminfo_sub = message_filters.Subscriber(
            '/kitti/camera_gray_left/camera_info', CameraInfo)
        right_caminfo_sub = message_filters.Subscriber(
            '/kitti/camera_gray_right/camera_info', CameraInfo)
        ts = message_filters.TimeSynchronizer([
            left_image_sub, right_image_sub, left_caminfo_sub,
            right_caminfo_sub
        ], 10)
        ts.registerCallback(self.callback)

        # Publisher
        self.disparity_image_pub = rospy.Publisher("/kitti/disparity_image",
                                                   Image,
                                                   queue_size=10)
        self.depth_image_pub = rospy.Publisher("/kitti/depth_image",
                                               Image,
                                               queue_size=10)
        self.stereo_pointcloud_pub = rospy.Publisher(
            "/kitti/stereo_pointcloud", PointCloud, queue_size=10)

    def callback(self, left_image, right_image, left_caminfo, right_caminfo):

        self.model.fromCameraInfo(left_caminfo, right_caminfo)

        try:
            left_cv_image = self.bridge.imgmsg_to_cv2(left_image, "mono8")
        except CvBridgeError as e:
            print(e)
        try:
            right_cv_image = self.bridge.imgmsg_to_cv2(right_image, "mono8")
        except CvBridgeError as e:
            print(e)

        disparity = self.processDisparity(left_cv_image, right_cv_image)

        pointcloud = self.processPointCloud(disparity,
                                            left_image.header.frame_id)

    def processDisparity(self, left_image, right_image):

        disparity16 = self.block_matcher.compute(left_image, right_image)
        rospy.loginfo("DISP16 %d %d" %
                      (np.amin(disparity16), np.amax(disparity16)))
        rospy.loginfo(disparity16.dtype)
        # We convert from fixed-point to float disparity and also adjust for any x-offset between
        # the principal points: d = d_fp*inv_dpp - (cx_l - cx_r)
        # rospy.loginfo("%f %f" % (self.model.left.cx(), self.model.right.cx()))
        disparity = np.float32(disparity16) * (1.0 / 16) - (
            self.model.left.cx() - self.model.right.cx())
        rospy.loginfo("DISP   %d %d" %
                      (np.amin(disparity), np.amax(disparity)))
        rospy.loginfo(disparity.dtype)

        disparity8 = (disparity16 + 15).astype(np.uint8)
        rospy.loginfo("DISP8  %d %d" %
                      (np.amin(disparity8), np.amax(disparity8)))
        rospy.loginfo(disparity8.dtype)

        try:
            self.disparity_image_pub.publish(
                self.bridge.cv2_to_imgmsg(disparity8, "mono8"))
        except CvBridgeError as e:
            print(e)

        return disparity

    def processPointCloud(self, disparity, frame_id):

        stereo_pointcloud = PointCloud()
        counter = 0
        min_x = [1000, 0, 0, -1]
        max_x = [-1000, 0, 0, -1]
        min_y = [1000, 0, 0, -1]
        max_y = [-1000, 0, 0, -1]
        min_z = [1000, 0, 0, -1]
        max_z = [-1000, 0, 0, -1]
        depth_image = np.zeros(disparity.shape)
        for u, row in enumerate(disparity):
            for v, pixel in enumerate(row):
                if pixel > 0:
                    point = self.model.projectPixelTo3d((u, v), pixel)
                    depth = point[2]
                    depth_image[u][v] = depth
                    if depth > 0 and depth < 70:
                        point32 = Point32(point[0], point[1], point[2])
                        stereo_pointcloud.points.append(point32)
                        counter += 1
                        if min_x[0] > point32.x:
                            min_x = [point32.x, u, v, pixel]
                            #print("MINX",u,v,point)
                        if min_y[0] > point32.y:
                            min_y = [point32.y, u, v, pixel]
                            #print("MINY", u,v,point)
                        if min_z[0] > point32.z:
                            min_z = [point32.z, u, v, pixel]
                            #print("MINZ",u,v,point)
                        if max_x[0] < point32.x:
                            max_x = [point32.x, u, v, pixel]
                            #print("MAXX",u,v,point)
                        if max_y[0] < point32.y:
                            max_y = [point32.y, u, v, pixel]
                            #print("MAXY",u,v,point)
                        if max_z[0] < point32.z:
                            max_z = [point32.z, u, v, pixel]
                            #print("MAXZ",u,v,point)
                    #if counter % 10000 == 0:
                    #    print(u,v,pixel,point)
        print("X", min_x, max_x)
        print("Y", min_y, max_y)
        print("Z", min_z, max_z)
        rospy.loginfo("Depth completition rate %f",
                      counter / disparity.shape[0] / disparity.shape[1])
        """
        plt.figure(1)
        plt.subplot(211)
        plt.imshow(disparity, 'gray')
        plt.subplot(212)
        plt.imshow(depth_image, vmax=70)
        mng = plt.get_current_fig_manager()
        mng.resize(*mng.window.maxsize())
        plt.show()
        """
        depth_image8 = depth_image.astype(np.uint8)
        try:
            self.depth_image_pub.publish(
                self.bridge.cv2_to_imgmsg(depth_image8, "mono8"))
        except CvBridgeError as e:
            print(e)

        header = std_msgs.msg.Header()
        header.stamp = rospy.Time.now()
        header.frame_id = frame_id
        stereo_pointcloud.header = header
        self.stereo_pointcloud_pub.publish(stereo_pointcloud)

        return stereo_pointcloud

    def reconfigure_callback(self, config, dummy):

        self.numDisparities = self.convert_num_disparities(
            config["numDisparities"])
        self.blockSize = self.convert_block_size(config["blockSize"])
        self.block_matcher = cv2.StereoBM_create(
            numDisparities=self.numDisparities, blockSize=self.blockSize)
        rospy.loginfo(
            """Reconfigure Request: {numDisparities}, {blockSize},""".format(
                **config))
        # Check to see if node should be started or stopped.
        # if self.enable != config["enable"]:
        #     if config["enable"]:
        #         self.start()
        #     else:
        #         self.stop()
        # self.enable = config["enable"]

        # Return the new variables.
        return config

    def convert_num_disparities(self, numDisparities):
        return 16 * numDisparities

    def convert_block_size(self, blockSize):
        return 2 * blockSize + 5
예제 #11
0
        rospy.logdebug(rospy.get_caller_id() + ' -> ROS already initialized')

    frameRate = 15
    slop = 1.0 / frameRate
    cams = StereoCameras("/stereo/left/image_rect",
                         "/stereo/right/image_rect",
                         "/stereo/left/camera_info",
                         "/stereo/right/camera_info",
                         slop=slop)

    camModel = StereoCameraModel()
    topicLeft = rospy.resolve_name("/stereo/left/camera_info")
    msgL = rospy.wait_for_message(topicLeft, CameraInfo, 3)
    topicRight = rospy.resolve_name("/stereo/right/camera_info")
    msgR = rospy.wait_for_message(topicRight, CameraInfo, 3)
    camModel.fromCameraInfo(msgL, msgR)

    tfSynch = CameraSync('/stereo/left/camera_info',
                         topics=[],
                         frames=[
                             'PSM2_psm_base_link', 'PSM2_tool_wrist_link',
                             'PSM2_tool_wrist_caudier_link_shaft'
                         ])
    rate = rospy.Rate(15)  # 15hz
    while not rospy.is_shutdown():
        # Get last images
        image = cams.camL.image

        # Wait for images to exist
        if type(image) == type(None):
            continue
예제 #12
0
def main(psmName):
    rospy.init_node('dvrk_registration', anonymous=True)
    robot = psm(psmName)
    toolOffset = .012  # distance from pinching axle to center of orange nub

    frameRate = 15
    slop = 1.0 / frameRate
    cams = StereoCameras("left/image_rect",
                         "right/image_rect",
                         "left/camera_info",
                         "right/camera_info",
                         slop=slop)

    tfSync = CameraSync(
        '/stereo/left/camera_info',
        topics=['/dvrk/' + psmName + '/position_cartesian_current'],
        frames=[
            psmName + '_psm_base_link', psmName + '_tool_wrist_link',
            psmName + '_tool_wrist_caudier_link_shaft'
        ])

    camModel = StereoCameraModel()
    topicLeft = rospy.resolve_name("left/camera_info")
    msgL = rospy.wait_for_message(topicLeft, CameraInfo, 10)
    topicRight = rospy.resolve_name("right/camera_info")
    msgR = rospy.wait_for_message(topicRight, CameraInfo, 10)
    camModel.fromCameraInfo(msgL, msgR)

    # Set up GUI
    scriptDirectory = os.path.dirname(os.path.abspath(__file__))
    filePath = os.path.join(scriptDirectory, '..', '..', 'defaults',
                            'registration_params.yaml')
    print(filePath)
    with open(filePath, 'r') as f:
        data = yaml.load(f)
    if 'H' not in data:
        rospy.logwarn('dVRK Registration: defaults/registration_params.yaml \
                       empty or malformed. Using defaults for orange tip')
        data = {
            'H': 23,
            'minS': 173,
            'minV': 68,
            'maxV': 255,
            'transform': np.eye(4).tolist()
        }

    cv2.namedWindow(_WINDOW_NAME)
    cv2.createTrackbar('H', _WINDOW_NAME, data['H'], 180, nothingCB)
    cv2.createTrackbar('min S', _WINDOW_NAME, data['minS'], 255, nothingCB)
    cv2.createTrackbar('min V', _WINDOW_NAME, data['minV'], 255, nothingCB)
    cv2.createTrackbar('max V', _WINDOW_NAME, data['maxV'], 255, nothingCB)
    cv2.createTrackbar('masked', _WINDOW_NAME, 0, 1, nothingCB)

    transformOld = np.array(data['transform'])

    # Wait for registration to start
    displayRegistration(cams, camModel, toolOffset, transformOld, tfSync)

    # Main registration
    (pointsA, pointsB) = getRegistrationPoints(robot, cams, camModel,
                                               toolOffset, tfSync)
    tf = calculateRegistration(pointsA, pointsB)

    # Save all parameters to YAML file
    data['transform'] = tf.tolist()
    data['H'] = cv2.getTrackbarPos('H', _WINDOW_NAME)
    data['minS'] = cv2.getTrackbarPos('min S', _WINDOW_NAME)
    data['minV'] = cv2.getTrackbarPos('min V', _WINDOW_NAME)
    data['maxV'] = cv2.getTrackbarPos('max V', _WINDOW_NAME)
    with open(filePath, 'w') as f:
        yaml.dump(data, f)

    # Publish transform as message to camera_transform_pub
    msg = posemath.toMsg(posemath.fromMatrix(tf))
    pub = rospy.Publisher('set_camera_transform',
                          Pose,
                          latch=True,
                          queue_size=10)
    pub.publish(msg)

    # Show Registration
    displayRegistration(cams, camModel, toolOffset, tf, tfSync, started=True)

    print('Done')
    cv2.destroyAllWindows()