Ejemplo n.º 1
0
def allObjectsInsideBin(scene, bin_num):
    global _tflistener
    for i in range(len(scene.objects)):
        poselist_shelf = poseTransform(pose2list(scene.objects[i].pose), 'map', 'shelf', _tflistener)
        if not inside_bin(poselist_shelf[0:3], bin_num):
            return False
        
    return True
Ejemplo n.º 2
0
def allObjectsInsideBin(scene, bin_num):
    global _tflistener
    for i in range(len(scene.objects)):
        poselist_shelf = poseTransform(pose2list(scene.objects[i].pose), 'map',
                                       'shelf', _tflistener)
        if not inside_bin(poselist_shelf[0:3], bin_num):
            return False

    return True
Ejemplo n.º 3
0
def transformObjectsFromCapsenToDesiredFrame(scene, scene_frame_id):
    global _tflistener
    global br
    newscene = copy.deepcopy(scene)
    for i in range(len(scene.objects)):
        poselist_capsen_world = poseTransform(pose2list(scene.objects[i].pose), scene_frame_id, 'map', _tflistener)
        cap_T_our = get_obj_capsentf(scene.objects[i].name) # x,y,z,qx,qy,qz,qw
        poselist_world = transformBack(cap_T_our, poselist_capsen_world)  
        newscene.objects[i].pose = poselist2pose(poselist_world)
        
        #pubFrame(br, poselist_world, 'obj_%s' % scene.objects[i].name, 'map')
    return newscene
Ejemplo n.º 4
0
def transformObjectsFromCapsenToDesiredFrame(scene, scene_frame_id):
    global _tflistener
    global br
    newscene = copy.deepcopy(scene)
    for i in range(len(scene.objects)):
        poselist_capsen_world = poseTransform(pose2list(scene.objects[i].pose),
                                              scene_frame_id, 'map',
                                              _tflistener)
        cap_T_our = get_obj_capsentf(
            scene.objects[i].name)  # x,y,z,qx,qy,qz,qw
        poselist_world = transformBack(cap_T_our, poselist_capsen_world)
        newscene.objects[i].pose = poselist2pose(poselist_world)

        #pubFrame(br, poselist_world, 'obj_%s' % scene.objects[i].name, 'map')
    return newscene
def main(argv=None):
    if argv is None:
        argv = sys.argv
        
    rospy.init_node('calib', anonymous=True)
    listener = tf.TransformListener()
    rospy.sleep(0.1)
    br = tf.TransformBroadcaster()
    rospy.sleep(1)
    
    calibtag = 'link6tag1'
    if len(argv) == 2:
        calibtag = argv[1]
    
    # kinect calib joints
    # j1: 9.93
    # j2: 39.46
    # j3: -32.74
    # j4: 69.54
    # j5: -11.26
    # j6: -70.88
    #link6tag1
    #link6tag3
    #link6tag1
    #link6tag3
    # j1: -16.98
    # j2: 65.55
    # j3: -13.48
    # j4: -21.15
    # j5: -51.26
    # j6: 10.63

    #link6tag1
    #link6tag3
    
    #filename = os.environ['APC_BASE']+'/catkin_ws/src/apc_config/camera_extrinsic_calib_data/camera_extrinsic_calib_data.json'
    if calibtag == 'link2tag':
        tag_link_transform = [0, 0.7, -0.2145-0.005, -math.pi, 0, 0]  
        link_frame_id = 'link_2'
    elif calibtag == 'linkbasetag':
        print 'move to q =', (np.array([2.35,-67.73,62.39,-154.9,-30.11,11]) / 180 * math.pi).tolist()
        print 'move to q =', (np.array([2.35,-77.73,72.39,-154.9,-20.11,11]) / 180 * math.pi).tolist()
        tag_link_transform = [0.5291, 0.4428, 0.005, math.pi, 0, 0]  
        link_frame_id = 'base_link'
    elif calibtag == 'link6tag1':
        #print 'move to q =', (np.array([9.93,39.37,-30.63,60.64,-12.12,-61.79]) / 180 * math.pi).tolist()
        tag_link_transform = [-(2.54 + 4 + 1 + 19.23 / 2) / 100.0, 0, 0.01, math.pi, 0, math.pi/2]  
        link_frame_id = 'link_6'
    elif calibtag == 'link6tag2':
        tag_link_transform = [-(2.54 + 4 + 1 + 19.23 / 2) / 100.0, 0, 0.01, math.pi, 0, 0]  
        link_frame_id = 'link_6'
    elif calibtag == 'link6tag3':
        tag_link_transform = [-(2.54 + 4 + 1 + 19.23 / 2) / 100.0, 0, 0.01, math.pi, 0, -math.pi/2]  
        link_frame_id = 'link_6'
    elif calibtag == 'link6tag4':
        tag_link_transform = [-(2.54 + 4 + 1 + 19.23 / 2) / 100.0, 0, 0.01, math.pi, 0, math.pi]  
        link_frame_id = 'link_6'
        
    # visualize it
    pubFrame(br, pose=tag_link_transform, frame_id='tag', parent_frame_id=link_frame_id, npub=10)
    
    
    tag_map_transform = poseTransform(tag_link_transform, link_frame_id, 'map', listener)
    
    apriltag_topic = 'tag_detections' 
    while True:
        tagdetect = ROS_Wait_For_Msg(apriltag_topic, AprilTagDetectionArray).getmsg() 
        if len(tagdetect.detections) == 1:
            tag_kinect_transform = pose2list(tagdetect.detections[0].pose.pose)
            break
    
    tag_kinect_tfm_mat = matrix_from_xyzquat(translate=tag_kinect_transform[0:3], 
                                                     quaternion=tag_kinect_transform[3:7]) 
    tag_map_tfm_mat = matrix_from_xyzquat(translate=tag_map_transform[0:3], 
                                                     quaternion=tag_map_transform[3:7]) 
    
    kinect_map_tfm_mat = np.dot(tag_map_tfm_mat, np.linalg.inv(tag_kinect_tfm_mat))
    
    kinect_map_pose = tfm.translation_from_matrix(kinect_map_tfm_mat).tolist() + tfm.quaternion_from_matrix(kinect_map_tfm_mat).tolist()
    

    link6tag = ['link6tag1', 'link6tag2', 'link6tag3', 'link6tag4']
    if calibtag in link6tag:
        print kinect_map_pose
        pubFrame(br, pose=kinect_map_pose, frame_id='new_kinect_pose', parent_frame_id='map', npub=10)
    else:
        realsense_link5_pose = poseTransform(kinect_map_pose, link_frame_id, 'link_5', listener)
        pubFrame(br, pose=realsense_link5_pose, frame_id='new_realsense_pose', parent_frame_id='link_5', npub=10)
        
        print realsense_link5_pose
def main(argv=None):
    if argv is None:
        argv = sys.argv
    
    parser = optparse.OptionParser()
    
    #import socket
    #useObject = not (socket.gethostname() == 'mcube-002' or socket.gethostname() == 'mcube-003')
    
    parser.add_option('-o', '--obj', action="store_true", dest='useObject', help='To use manual object fitting', 
                      default=False)
                      
    parser.add_option('-c', '--cammarker', action="store_true", dest='useCamMarker', help='To use manual camera fitting or not', 
                      default=False)
                      
    parser.add_option('-s', '--shelfmarker', action="store_true", dest='useShelfMarker', help='To use manual shelf fitting or not', 
                      default=False)
                      
    parser.add_option('-a', '--all', action="store_true", dest='useAllMarker', help='To use manual fitting or not', 
                      default=False)
    
    
    rospy.init_node("manual_fit_server")
    
    global currentExpoPose
    global currentCrayolaPose
    global currentKinectPose
    global currentShelfPose
    global currentJoint_realsense
    global currentJoint_realsense_link5
    global brKinect
    global brShelf
    global nkinect
    global vis_pub
    global opt
    global pyramid_marker
    
    (opt, args) = parser.parse_args()
    currentExpoPose = Pose()
    currentCrayolaPose = Pose()
    currentKinectPose = [Pose(),Pose()]
    currentShelfPose = Pose()
    currentJoint_realsense = Pose()
    brKinect = TransformBroadcaster()
    brShelf = TransformBroadcaster()
    
    global listener
    listener = tf.TransformListener()
    rospy.sleep(1)
    # create an interactive marker server on the topic namespace simple_marker
    server = InteractiveMarkerServer("affordance_marker")
    
    # on mcube-002 don't use object
    
    if opt.useObject or opt.useAllMarker:
        # create an interactive marker for Expo
        pose = [1.58220779896, 0.296458333731, 1.12021064758, -0.00197346811183, -0.738883018494, 0.00179956667125, 0.673828423023]
        int_marker = createInteractiveMarker('ExpoEraser', *pose)
        
        meshmarker = createMeshMarker('package://apc_config/models/object_meshes/expo_dry_erase_board_eraser.stl', 
                                  offset=tuple(pose[0:3]), rgba=(0,1,0,1),
                                  orientation=tuple(pose[3:7]))
        int_marker.controls.append(meshmarker)
        int_marker.controls.extend(createMoveControls())
        server.insert(int_marker, processFeedback)
        
        # create an interactive marker for Crayola
        pose = [1.63922035694, -0.273455768824, 1.12480783463, -0.0384246744215, 0.697018921375, -0.715074837208, 0.0368278548121]
        int_marker = createInteractiveMarker('Crayola', *pose)
        meshmarker = createMeshMarker('package://apc_config/models/object_meshes/crayola_64_ct.stl', 
                                  offset=tuple(pose[0:3]), rgba=(1,0,0,1),
                                  orientation=tuple(pose[3:7]))
        int_marker.controls.append(meshmarker)
        int_marker.controls.extend(createMoveControls())
        server.insert(int_marker, processFeedback)

    # create an interactive marker for Shelf
    currentShelfPose.position.x = rospy.get_param('shelf_pose/x')
    currentShelfPose.position.y = rospy.get_param('shelf_pose/y')
    currentShelfPose.position.z = rospy.get_param('shelf_pose/z')
    currentShelfPose.orientation.x = rospy.get_param('shelf_pose/qx')
    currentShelfPose.orientation.y = rospy.get_param('shelf_pose/qy')
    currentShelfPose.orientation.z = rospy.get_param('shelf_pose/qz')
    currentShelfPose.orientation.w = rospy.get_param('shelf_pose/qw')
    
    int_marker = createInteractiveMarker('Shelf', currentShelfPose.position.x, currentShelfPose.position.y, currentShelfPose.position.z,
             currentShelfPose.orientation.x, currentShelfPose.orientation.y, currentShelfPose.orientation.z, currentShelfPose.orientation.w)
    meshmarker = createMeshMarker('package://apc_config/models/kiva_pod/meshes/pod_lowres.stl', 
                              offset=(currentShelfPose.position.x, currentShelfPose.position.y, currentShelfPose.position.z), rgba=(0,0,1,0.5),
                              orientation=(currentShelfPose.orientation.x, currentShelfPose.orientation.y, currentShelfPose.orientation.z, currentShelfPose.orientation.w))
    int_marker.controls.append(meshmarker)
    if opt.useShelfMarker or opt.useAllMarker:
        int_marker.controls.extend(createMoveControls())
    server.insert(int_marker, processFeedback)
    rospy.Timer(rospy.Duration(0.01), frameCallbackShelf)
    
    # create an interactive marker for Kinects
    nkinect = 2
    
    # old manual fit kinect pose
    #pose = [[0.305660784245, -0.883868515491, 1.66722476482, -0.244159132242, 0.827043175697, 0.0950953885913, 0.497335940599],
    #        [0.367319256067, 0.855090618134, 1.78848266602, 0.800817668438, -0.247087046504, 0.528468191624, 0.135500892997]]
            
    #pose = [[0.2643078824103202, -0.8475994497881857, 1.5983246933067445, -0.23088847539406132, 0.822983396811164, 0.08995383249481428, 0.511169994763495],
            #[0.3659496377911322, 0.8087564208783671, 1.779781933883937, 0.8065800933146582, -0.24219685501387836, 0.5258798075015827, 0.11924245508274978]]
            
    # pose  = [[0.2615747933866079, -0.847265732303331, 1.5924661390467871, -0.22946772410305405, 0.8209085501266424, 0.08173898334984725, 0.5164978476596324],
            # [0.35734815800402436, 0.809898201970986, 1.7640327819976598, 0.8036871694243676, -0.2417794937981231, 0.5299934028148222, 0.12138698747903716]]
    pose  = [[0.254395043470156, -0.849313121283084, 1.5914400427256015, -0.2499411509851439, 0.8071675924380618, 0.11813937247844475, 0.5215836382491776],
            [0.3581305540948658, 0.8062610720617674, 1.7711358584054768, 0.8090512113331657, -0.2868798404832606, 0.494998046288383, 0.13458167753046513]]

    for i in range(nkinect):
        currentKinectPose[i] = poselist2pose( pose[i] )
        
        if opt.useCamMarker or opt.useAllMarker:
            int_marker = createInteractiveMarker('Kinect%d' % (i+1), *pose[i])
            int_marker.controls.extend(createMoveControls(fixed=True))
            server.insert(int_marker, processFeedback)

    #pose = [-0.0246461518109, -0.000698581337929, 0.0109369456768, 0.706081748009, 0.706393122673, -0.0283508431166, 0.04066388309]
    #pose = [-0.030973277986, 0.0219592899084, -0.0248876661062, 0.702517032623, 0.711110830307, 0.0224311985075, 0.0169796515256]
    #pose = [-0.0141396149993, 0.0219592899084, -0.0248876661062, 0.70285320282, 0.71131336689, 0.00553341582417, -0.000123182311654]
    #pose = [-0.0186747914648, 0.00628428290067, -0.0175085197216, 0.702853216106, 0.711313378682, 0.00553342037479, -0.0001231898954]
    #pose = [-0.0191506993199, 0.0277181266145, -0.0117737966167, 0.70285321606, 0.711313378699, 0.00553342388632, -0.000123194876927]
    pose = [-0.023043141477, 0.0275422775115, -0.0114395739518, 0.70285321606, 0.711313378699, 0.00553342388632, -0.000123194876927]
    pose_world = poseTransform(pose, 'link_5', 'map', listener)
    currentJoint_realsense = poselist2pose( pose_world )
    currentJoint_realsense_link5 = poselist2pose( pose )

    if opt.useCamMarker or opt.useAllMarker:
        int_marker = createInteractiveMarker('joint_realsense', *pose_world, frame_id='map')
        # cubemarker = createCubeMarker(offset=tuple(pose[0:3]), rgba=(1,0,1,0.5),
                              # orientation=tuple(pose[3:7]),
                              # scale=(0.01,0.01,0.01))
        #int_marker.controls.append(cubemarker)
        viewMarker = createViewMarker(frame_id='realsense_depth_optical_frame')
        #points = [[0.15748027363422223, 0.08267716536239379, 0.2], [-0.15748027363422223, 0.08267716536239379, 0.2], [0.15748027363422223, -0.08267716536239379, 0.2], [-0.15748027363422223, -0.08267716536239379, 0.2] ]
        # meshmarker = createMeshMarker('package://apc_config/models/object_meshes/pyramid.stl', 
                      # offset=tuple([0,0,0]), rgba=(1,1,0,0.1),
                      # orientation=tuple([0,0,0,1]), frame_id='realsense_depth_optical_frame', scale = 0.001)
        # int_marker.controls.append(meshmarker)
        # 
        # meshmarker = createMeshMarker('package://apc_config/models/object_meshes/pyramid.stl', 
                      # offset=tuple([0,0,0]), rgba=(0,1,0,0.5),
                      # orientation=tuple([0,0,0,1]), frame_id='realsense_depth_optical_frame', scale = 0.002)
        # int_marker.controls.append(meshmarker)
        
        
        int_marker.controls.extend(createMoveControls(fixed=True))
        server.insert(int_marker, processFeedback)
        
        vis_pub = rospy.Publisher('visualization_marker', Marker, queue_size=10)

        pyramid_marker = createMeshMarker('package://apc_config/models/object_meshes/pyramid.stl', 
                      offset=tuple([0,0,0]), rgba=(1,1,0,0.1),
                      orientation=tuple([0,0,0,1]), frame_id='realsense_depth_optical_frame', scale = 0.002, scales = [2, 2,1]).markers[0]
    
    rospy.Timer(rospy.Duration(0.01), frameCallbackKinect)

    server.applyChanges()
    
    # ros services
    startPoseService()
    
    rospy.spin()
Ejemplo n.º 7
0
def main(argv=None):
    if argv is None:
        argv = sys.argv
    rospy.init_node('calib', anonymous=True)
    listener = tf.TransformListener()
    rospy.sleep(0.1)
    br = tf.TransformBroadcaster()
    rospy.sleep(1)
    
    calibtag = 'link6tag1'
    if len(argv) == 2:
        calibtag = argv[1]
    
    #filename = os.environ['APC_BASE']+'/catkin_ws/src/apc_config/camera_extrinsic_calib_data/camera_extrinsic_calib_data.json'
    if calibtag == 'link2tag':
        tag_link_transform = [0, 0.7, -0.2145-0.005, -math.pi, 0, 0]  
        link_frame_id = 'link_2'
    elif calibtag == 'realsense1':
        print 'move to q =', (np.array([ 0.0, -23.02, 34.73, 154.93, -25.15, 0.0]) / 180 * math.pi).tolist()
        
        r = 0.1923/2
        tag_link_transform = [0.4033-0.230 -r, -1.121/2 + 0.044 + r, 0.005, 0, 0, 0]  
        link_frame_id = 'base_link'
    elif calibtag == 'link6tag1':
        #print 'move to q =', (np.array([9.93,39.37,-30.63,60.64,-12.12,-61.79]) / 180 * math.pi).tolist()
        tag_link_transform = [-(2.54 + 4 + 1 + 19.23 / 2) / 100.0, 0, 0.01, math.pi, 0, math.pi/2]  
        link_frame_id = 'link_6'
    elif calibtag == 'link6tag2':
        tag_link_transform = [-(2.54 + 4 + 1 + 19.23 / 2) / 100.0, 0, 0.01, math.pi, 0, 0]  
        link_frame_id = 'link_6'
    elif calibtag == 'link6tag3':
        tag_link_transform = [-(2.54 + 4 + 1 + 19.23 / 2) / 100.0, 0, 0.01, math.pi, 0, -math.pi/2]  
        link_frame_id = 'link_6'
    elif calibtag == 'link6tag4':
        tag_link_transform = [-(2.54 + 4 + 1 + 19.23 / 2) / 100.0, 0, 0.01, math.pi, 0, math.pi]  
        link_frame_id = 'link_6'
        
    # visualize it
    pubFrame(br, pose=tag_link_transform, frame_id='tag', parent_frame_id=link_frame_id, npub=10)
    
    
    tag_map_transform = poseTransform(tag_link_transform, link_frame_id, 'map', listener)
    
    apriltag_topic = '/pr_apriltags/detections' 
    print 'Wait for apriltag detection'
    tag_camera_transforms = []
    for i in xrange(5):
        while True:
            tagdetect = ROS_Wait_For_Msg(apriltag_topic, AprilTagDetections).getmsg() 
            if len(tagdetect.detections) == 1:
                tag_camera_transform = pose2list(tagdetect.detections[0].pose)
                tag_camera_transforms.append(tag_camera_transform)
                break
    
    tag_camera_transform = np.average(np.array(tag_camera_transforms), axis = 0)
    
    tag_camera_tfm_mat = matrix_from_xyzquat(tag_camera_transform) 
    tag_map_tfm_mat = matrix_from_xyzquat(tag_map_transform) 
    
    camera_map_tfm_mat = np.dot(tag_map_tfm_mat, np.linalg.inv(tag_camera_tfm_mat))
    
    camera_map_pose = tfm.translation_from_matrix(camera_map_tfm_mat).tolist() + tfm.quaternion_from_matrix(camera_map_tfm_mat).tolist()
    

    link6tag = ['link6tag1', 'link6tag2', 'link6tag3', 'link6tag4']
    if calibtag in link6tag:
        print camera_map_pose
        pubFrame(br, pose=camera_map_pose, frame_id='new_kinect_pose', parent_frame_id='map', npub=10)
    elif calibtag == 'realsense1':
        realsense_link5_pose = poseTransform(camera_map_pose, link_frame_id, 'link_5', listener)
        pubFrame(br, pose=realsense_link5_pose, frame_id='new_realsense_pose', parent_frame_id='link_5', npub=10)
        
        print realsense_link5_pose
Ejemplo n.º 8
0
def _detectOneObject(obj_id, bin_num, kinect_num):
    global _detect_one_object_srv
    global br
    
    print 'In', bcolors.WARNING, '_detectOneObject', bcolors.ENDC, 'obj_ids:', obj_id, 'bin_num:', bin_num
    # filter the point cloud
    pc, foreground_mask = get_filtered_pointcloud([obj_id], bin_num, kinect_num)
    if pc is None:
        return (None, None)
    
    # string[] model_names
    # sensor_msgs/PointCloud2 cloud   # Note: this must be an organized point cloud (e.g., 640x480)
    # bool[] foreground_mask
    # bool find_exact_object_list     # Set to true if you only want to consider scene hypotheses that contain exactly the objects in 'model_names'
    # ObjectConstraint[] constraints  # These apply to all objects
    # ---
    # SceneHypothesis[] detections
    
    
    # 2. prepare constraints
    
    bin_cnstr = get_bin_cnstr()[bin_num] # a list of right \ # left \ # back \  # front  \ # bottom \ # top
    ccnstr = []
    
    tol = 0.9  # larger is more strict
    (trans,rot) = lookupTransform(pc.header.frame_id, '/shelf', _tflistener)
    # 2.1 right
    ccnstr.append( createCapsenConstraint(ObjectConstraint.HALF_SPACE, transformPlane([1,0,0], [bin_cnstr[0],0,0], trans,rot, pc.header.frame_id), tol, bin_num) )
    # 2.2 left
    ccnstr.append( createCapsenConstraint(ObjectConstraint.HALF_SPACE, transformPlane([-1,0,0], [bin_cnstr[1],0,0], trans,rot, pc.header.frame_id), tol, bin_num) )
    # 2.3 back
    ccnstr.append( createCapsenConstraint(ObjectConstraint.HALF_SPACE, transformPlane([0,1,0], [0,bin_cnstr[2],0], trans,rot, pc.header.frame_id), tol, bin_num) )
    # 2.4 front
    ccnstr.append( createCapsenConstraint(ObjectConstraint.HALF_SPACE, transformPlane([0,-1,0], [0,bin_cnstr[3],0], trans,rot, pc.header.frame_id), tol, bin_num) )
    # 2.5 floor
    ccnstr.append( createCapsenConstraint(ObjectConstraint.HALF_SPACE, transformPlane([0,0,1], [0,0,bin_cnstr[4]], trans,rot, pc.header.frame_id), tol, bin_num) )
    # 2.6 top
    ccnstr.append( createCapsenConstraint(ObjectConstraint.HALF_SPACE, transformPlane([0,0,-1], [0,0,bin_cnstr[5]], trans,rot, pc.header.frame_id), tol, bin_num) )
    # 2.7 on floor
    floor_thick = 0.03
    ccnstr.append( createCapsenConstraint(ObjectConstraint.SUPPORTING_PLANE, transformPlane([0,0,1], [0,0, bin_cnstr[4]-floor_thick/2], trans,rot, pc.header.frame_id), tol, bin_num) )
    # string model_name
    # sensor_msgs/PointCloud2 cloud   # Note: this must be an organized point cloud (e.g., 640x480)
    # bool[] foreground_mask
    # ObjectConstraint[] constraints
    # geometry_msgs/Pose true_pose    # for testing
    # ---
    # 
    # ObjectHypothesis[] detections

    with Timer('detect_one_object'):
        # detect using capsen
        service_name = '/detection_service/detect_one_object'
        req = DetectOneObjectRequest()
        req.model_name = obj_id
        req.cloud = pc
        req.constraints = ccnstr
        req.foreground_mask = foreground_mask
        #req.foreground_mask = [True for i in xrange(req.cloud.height*req.cloud.width)]
        
        print 'Waiting for service up: ', service_name
        rospy.wait_for_service(service_name)
        try:
            print 'Calling service:', service_name
            ret = _detect_one_object_srv(req)
            # ret.detections is a list of capsen_vision/ObjectHypothesis
                # string name
                # geometry_msgs/Pose pose
                # float32 score
                # float32[] score_components
            if len(ret.detections)>0:
                print len(ret.detections), 'ObjectHypothesis returned, max score', ret.detections[0].score
                for i in range(len(ret.detections)):
                    poselist_capsen_world = poseTransform(pose2list(ret.detections[i].pose), pc.header.frame_id, 'map', _tflistener)
                    
                    cap_T_our = get_obj_capsentf(obj_id) # x,y,z,qx,qy,qz,qw
                    poselist_world = transformBack(cap_T_our, poselist_capsen_world)    # transform to our desired pose
                    
                    # check whether inside bin
                    poselist_shelf = poseTransform(poselist_world, 'map', 'shelf', _tflistener)
                    if inside_bin(poselist_shelf[0:3], bin_num):
                        #pubFrame(br, poselist_world, 'obj', 'map')
                        return (poselist_world, ret.detections[i].score)
                    else:
                        print 'reject hypo', i, 'because it is outside the target bin'
                print 'No ObjectHypothesis satisfy hard bin constraint'
                return (None, None)
            else:
                print 'No ObjectHypothesis returned'
                return (None, None)
        except:
            print 'Calling service:', service_name, 'failed'
            print 'encounters errors:', traceback.format_exc()
            return (None, None)
Ejemplo n.º 9
0
def _detectOneObject(obj_id, bin_num, kinect_num):
    global _detect_one_object_srv
    global br

    print 'In', bcolors.WARNING, '_detectOneObject', bcolors.ENDC, 'obj_ids:', obj_id, 'bin_num:', bin_num
    # filter the point cloud
    pc, foreground_mask = get_filtered_pointcloud([obj_id], bin_num,
                                                  kinect_num)
    if pc is None:
        return (None, None)

    # string[] model_names
    # sensor_msgs/PointCloud2 cloud   # Note: this must be an organized point cloud (e.g., 640x480)
    # bool[] foreground_mask
    # bool find_exact_object_list     # Set to true if you only want to consider scene hypotheses that contain exactly the objects in 'model_names'
    # ObjectConstraint[] constraints  # These apply to all objects
    # ---
    # SceneHypothesis[] detections

    # 2. prepare constraints

    bin_cnstr = get_bin_cnstr(
    )[bin_num]  # a list of right \ # left \ # back \  # front  \ # bottom \ # top
    ccnstr = []

    tol = 0.9  # larger is more strict
    (trans, rot) = lookupTransform(pc.header.frame_id, '/shelf', _tflistener)
    # 2.1 right
    ccnstr.append(
        createCapsenConstraint(
            ObjectConstraint.HALF_SPACE,
            transformPlane([1, 0, 0], [bin_cnstr[0], 0, 0], trans, rot,
                           pc.header.frame_id), tol, bin_num))
    # 2.2 left
    ccnstr.append(
        createCapsenConstraint(
            ObjectConstraint.HALF_SPACE,
            transformPlane([-1, 0, 0], [bin_cnstr[1], 0, 0], trans, rot,
                           pc.header.frame_id), tol, bin_num))
    # 2.3 back
    ccnstr.append(
        createCapsenConstraint(
            ObjectConstraint.HALF_SPACE,
            transformPlane([0, 1, 0], [0, bin_cnstr[2], 0], trans, rot,
                           pc.header.frame_id), tol, bin_num))
    # 2.4 front
    ccnstr.append(
        createCapsenConstraint(
            ObjectConstraint.HALF_SPACE,
            transformPlane([0, -1, 0], [0, bin_cnstr[3], 0], trans, rot,
                           pc.header.frame_id), tol, bin_num))
    # 2.5 floor
    ccnstr.append(
        createCapsenConstraint(
            ObjectConstraint.HALF_SPACE,
            transformPlane([0, 0, 1], [0, 0, bin_cnstr[4]], trans, rot,
                           pc.header.frame_id), tol, bin_num))
    # 2.6 top
    ccnstr.append(
        createCapsenConstraint(
            ObjectConstraint.HALF_SPACE,
            transformPlane([0, 0, -1], [0, 0, bin_cnstr[5]], trans, rot,
                           pc.header.frame_id), tol, bin_num))
    # 2.7 on floor
    floor_thick = 0.03
    ccnstr.append(
        createCapsenConstraint(
            ObjectConstraint.SUPPORTING_PLANE,
            transformPlane([0, 0, 1], [0, 0, bin_cnstr[4] - floor_thick / 2],
                           trans, rot, pc.header.frame_id), tol, bin_num))
    # string model_name
    # sensor_msgs/PointCloud2 cloud   # Note: this must be an organized point cloud (e.g., 640x480)
    # bool[] foreground_mask
    # ObjectConstraint[] constraints
    # geometry_msgs/Pose true_pose    # for testing
    # ---
    #
    # ObjectHypothesis[] detections

    with Timer('detect_one_object'):
        # detect using capsen
        service_name = '/detection_service/detect_one_object'
        req = DetectOneObjectRequest()
        req.model_name = obj_id
        req.cloud = pc
        req.constraints = ccnstr
        req.foreground_mask = foreground_mask
        #req.foreground_mask = [True for i in xrange(req.cloud.height*req.cloud.width)]

        print 'Waiting for service up: ', service_name
        rospy.wait_for_service(service_name)
        try:
            print 'Calling service:', service_name
            ret = _detect_one_object_srv(req)
            # ret.detections is a list of capsen_vision/ObjectHypothesis
            # string name
            # geometry_msgs/Pose pose
            # float32 score
            # float32[] score_components
            if len(ret.detections) > 0:
                print len(
                    ret.detections
                ), 'ObjectHypothesis returned, max score', ret.detections[
                    0].score
                for i in range(len(ret.detections)):
                    poselist_capsen_world = poseTransform(
                        pose2list(ret.detections[i].pose), pc.header.frame_id,
                        'map', _tflistener)

                    cap_T_our = get_obj_capsentf(obj_id)  # x,y,z,qx,qy,qz,qw
                    poselist_world = transformBack(
                        cap_T_our,
                        poselist_capsen_world)  # transform to our desired pose

                    # check whether inside bin
                    poselist_shelf = poseTransform(poselist_world, 'map',
                                                   'shelf', _tflistener)
                    if inside_bin(poselist_shelf[0:3], bin_num):
                        #pubFrame(br, poselist_world, 'obj', 'map')
                        return (poselist_world, ret.detections[i].score)
                    else:
                        print 'reject hypo', i, 'because it is outside the target bin'
                print 'No ObjectHypothesis satisfy hard bin constraint'
                return (None, None)
            else:
                print 'No ObjectHypothesis returned'
                return (None, None)
        except:
            print 'Calling service:', service_name, 'failed'
            print 'encounters errors:', traceback.format_exc()
            return (None, None)
Ejemplo n.º 10
0
def main(argv=None):
    if argv is None:
        argv = sys.argv

    parser = optparse.OptionParser()

    #import socket
    #useObject = not (socket.gethostname() == 'mcube-002' or socket.gethostname() == 'mcube-003')

    parser.add_option('-o',
                      '--obj',
                      action="store_true",
                      dest='useObject',
                      help='To use manual object fitting',
                      default=False)

    parser.add_option('-c',
                      '--cammarker',
                      action="store_true",
                      dest='useCamMarker',
                      help='To use manual camera fitting or not',
                      default=False)

    parser.add_option('-s',
                      '--shelfmarker',
                      action="store_true",
                      dest='useShelfMarker',
                      help='To use manual shelf fitting or not',
                      default=False)

    parser.add_option('-a',
                      '--all',
                      action="store_true",
                      dest='useAllMarker',
                      help='To use manual fitting or not',
                      default=False)

    rospy.init_node("manual_fit_server")

    global currentExpoPose
    global currentCrayolaPose
    global currentKinectPose
    global currentShelfPose
    global currentJoint_realsense
    global currentJoint_realsense_link5
    global brKinect
    global brShelf
    global nkinect
    global vis_pub
    global opt
    global pyramid_marker

    (opt, args) = parser.parse_args()
    currentExpoPose = Pose()
    currentCrayolaPose = Pose()
    currentKinectPose = [Pose(), Pose()]
    currentShelfPose = Pose()
    currentJoint_realsense = Pose()
    brKinect = TransformBroadcaster()
    brShelf = TransformBroadcaster()

    global listener
    listener = tf.TransformListener()
    rospy.sleep(1)
    # create an interactive marker server on the topic namespace simple_marker
    server = InteractiveMarkerServer("affordance_marker")

    # on mcube-002 don't use object

    if opt.useObject or opt.useAllMarker:
        # create an interactive marker for Expo
        pose = [
            1.58220779896, 0.296458333731, 1.12021064758, -0.00197346811183,
            -0.738883018494, 0.00179956667125, 0.673828423023
        ]
        int_marker = createInteractiveMarker('ExpoEraser', *pose)

        meshmarker = createMeshMarker(
            'package://apc_config/models/object_meshes/expo_dry_erase_board_eraser.stl',
            offset=tuple(pose[0:3]),
            rgba=(0, 1, 0, 1),
            orientation=tuple(pose[3:7]))
        int_marker.controls.append(meshmarker)
        int_marker.controls.extend(createMoveControls())
        server.insert(int_marker, processFeedback)

        # create an interactive marker for Crayola
        pose = [
            1.63922035694, -0.273455768824, 1.12480783463, -0.0384246744215,
            0.697018921375, -0.715074837208, 0.0368278548121
        ]
        int_marker = createInteractiveMarker('Crayola', *pose)
        meshmarker = createMeshMarker(
            'package://apc_config/models/object_meshes/crayola_64_ct.stl',
            offset=tuple(pose[0:3]),
            rgba=(1, 0, 0, 1),
            orientation=tuple(pose[3:7]))
        int_marker.controls.append(meshmarker)
        int_marker.controls.extend(createMoveControls())
        server.insert(int_marker, processFeedback)

    # create an interactive marker for Shelf
    currentShelfPose.position.x = rospy.get_param('shelf_pose/x')
    currentShelfPose.position.y = rospy.get_param('shelf_pose/y')
    currentShelfPose.position.z = rospy.get_param('shelf_pose/z')
    currentShelfPose.orientation.x = rospy.get_param('shelf_pose/qx')
    currentShelfPose.orientation.y = rospy.get_param('shelf_pose/qy')
    currentShelfPose.orientation.z = rospy.get_param('shelf_pose/qz')
    currentShelfPose.orientation.w = rospy.get_param('shelf_pose/qw')

    int_marker = createInteractiveMarker(
        'Shelf', currentShelfPose.position.x, currentShelfPose.position.y,
        currentShelfPose.position.z, currentShelfPose.orientation.x,
        currentShelfPose.orientation.y, currentShelfPose.orientation.z,
        currentShelfPose.orientation.w)
    meshmarker = createMeshMarker(
        'package://apc_config/models/kiva_pod/meshes/pod_lowres.stl',
        offset=(currentShelfPose.position.x, currentShelfPose.position.y,
                currentShelfPose.position.z),
        rgba=(0, 0, 1, 0.5),
        orientation=(currentShelfPose.orientation.x,
                     currentShelfPose.orientation.y,
                     currentShelfPose.orientation.z,
                     currentShelfPose.orientation.w))
    int_marker.controls.append(meshmarker)
    if opt.useShelfMarker or opt.useAllMarker:
        int_marker.controls.extend(createMoveControls())
    server.insert(int_marker, processFeedback)
    rospy.Timer(rospy.Duration(0.01), frameCallbackShelf)

    # create an interactive marker for Kinects
    nkinect = 2

    # old manual fit kinect pose
    #pose = [[0.305660784245, -0.883868515491, 1.66722476482, -0.244159132242, 0.827043175697, 0.0950953885913, 0.497335940599],
    #        [0.367319256067, 0.855090618134, 1.78848266602, 0.800817668438, -0.247087046504, 0.528468191624, 0.135500892997]]

    #pose = [[0.2643078824103202, -0.8475994497881857, 1.5983246933067445, -0.23088847539406132, 0.822983396811164, 0.08995383249481428, 0.511169994763495],
    #[0.3659496377911322, 0.8087564208783671, 1.779781933883937, 0.8065800933146582, -0.24219685501387836, 0.5258798075015827, 0.11924245508274978]]

    # pose  = [[0.2615747933866079, -0.847265732303331, 1.5924661390467871, -0.22946772410305405, 0.8209085501266424, 0.08173898334984725, 0.5164978476596324],
    # [0.35734815800402436, 0.809898201970986, 1.7640327819976598, 0.8036871694243676, -0.2417794937981231, 0.5299934028148222, 0.12138698747903716]]
    pose = [[
        0.254395043470156, -0.849313121283084, 1.5914400427256015,
        -0.2499411509851439, 0.8071675924380618, 0.11813937247844475,
        0.5215836382491776
    ],
            [
                0.3581305540948658, 0.8062610720617674, 1.7711358584054768,
                0.8090512113331657, -0.2868798404832606, 0.494998046288383,
                0.13458167753046513
            ]]

    for i in range(nkinect):
        currentKinectPose[i] = poselist2pose(pose[i])

        if opt.useCamMarker or opt.useAllMarker:
            int_marker = createInteractiveMarker('Kinect%d' % (i + 1),
                                                 *pose[i])
            int_marker.controls.extend(createMoveControls(fixed=True))
            server.insert(int_marker, processFeedback)

    #pose = [-0.0246461518109, -0.000698581337929, 0.0109369456768, 0.706081748009, 0.706393122673, -0.0283508431166, 0.04066388309]
    #pose = [-0.030973277986, 0.0219592899084, -0.0248876661062, 0.702517032623, 0.711110830307, 0.0224311985075, 0.0169796515256]
    #pose = [-0.0141396149993, 0.0219592899084, -0.0248876661062, 0.70285320282, 0.71131336689, 0.00553341582417, -0.000123182311654]
    #pose = [-0.0186747914648, 0.00628428290067, -0.0175085197216, 0.702853216106, 0.711313378682, 0.00553342037479, -0.0001231898954]
    #pose = [-0.0191506993199, 0.0277181266145, -0.0117737966167, 0.70285321606, 0.711313378699, 0.00553342388632, -0.000123194876927]
    pose = [
        -0.023043141477, 0.0275422775115, -0.0114395739518, 0.70285321606,
        0.711313378699, 0.00553342388632, -0.000123194876927
    ]
    pose_world = poseTransform(pose, 'link_5', 'map', listener)
    currentJoint_realsense = poselist2pose(pose_world)
    currentJoint_realsense_link5 = poselist2pose(pose)

    if opt.useCamMarker or opt.useAllMarker:
        int_marker = createInteractiveMarker('joint_realsense',
                                             *pose_world,
                                             frame_id='map')
        # cubemarker = createCubeMarker(offset=tuple(pose[0:3]), rgba=(1,0,1,0.5),
        # orientation=tuple(pose[3:7]),
        # scale=(0.01,0.01,0.01))
        #int_marker.controls.append(cubemarker)
        viewMarker = createViewMarker(frame_id='realsense_depth_optical_frame')
        #points = [[0.15748027363422223, 0.08267716536239379, 0.2], [-0.15748027363422223, 0.08267716536239379, 0.2], [0.15748027363422223, -0.08267716536239379, 0.2], [-0.15748027363422223, -0.08267716536239379, 0.2] ]
        # meshmarker = createMeshMarker('package://apc_config/models/object_meshes/pyramid.stl',
        # offset=tuple([0,0,0]), rgba=(1,1,0,0.1),
        # orientation=tuple([0,0,0,1]), frame_id='realsense_depth_optical_frame', scale = 0.001)
        # int_marker.controls.append(meshmarker)
        #
        # meshmarker = createMeshMarker('package://apc_config/models/object_meshes/pyramid.stl',
        # offset=tuple([0,0,0]), rgba=(0,1,0,0.5),
        # orientation=tuple([0,0,0,1]), frame_id='realsense_depth_optical_frame', scale = 0.002)
        # int_marker.controls.append(meshmarker)

        int_marker.controls.extend(createMoveControls(fixed=True))
        server.insert(int_marker, processFeedback)

        vis_pub = rospy.Publisher('visualization_marker',
                                  Marker,
                                  queue_size=10)

        pyramid_marker = createMeshMarker(
            'package://apc_config/models/object_meshes/pyramid.stl',
            offset=tuple([0, 0, 0]),
            rgba=(1, 1, 0, 0.1),
            orientation=tuple([0, 0, 0, 1]),
            frame_id='realsense_depth_optical_frame',
            scale=0.002,
            scales=[2, 2, 1]).markers[0]

    rospy.Timer(rospy.Duration(0.01), frameCallbackKinect)

    server.applyChanges()

    # ros services
    startPoseService()

    rospy.spin()
Ejemplo n.º 11
0
def main(argv=None):
    if argv is None:
        argv = sys.argv

    rospy.init_node('calib', anonymous=True)
    listener = tf.TransformListener()
    rospy.sleep(0.1)
    br = tf.TransformBroadcaster()
    rospy.sleep(1)

    calibtag = 'link6tag1'
    if len(argv) == 2:
        calibtag = argv[1]

    # kinect calib joints
    # j1: 9.93
    # j2: 39.46
    # j3: -32.74
    # j4: 69.54
    # j5: -11.26
    # j6: -70.88
    #link6tag1
    #link6tag3
    #link6tag1
    #link6tag3
    # j1: -16.98
    # j2: 65.55
    # j3: -13.48
    # j4: -21.15
    # j5: -51.26
    # j6: 10.63

    #link6tag1
    #link6tag3

    #filename = os.environ['APC_BASE']+'/catkin_ws/src/apc_config/camera_extrinsic_calib_data/camera_extrinsic_calib_data.json'
    if calibtag == 'link2tag':
        tag_link_transform = [0, 0.7, -0.2145 - 0.005, -math.pi, 0, 0]
        link_frame_id = 'link_2'
    elif calibtag == 'linkbasetag':
        print 'move to q =', (
            np.array([2.35, -67.73, 62.39, -154.9, -30.11, 11]) / 180 *
            math.pi).tolist()
        print 'move to q =', (
            np.array([2.35, -77.73, 72.39, -154.9, -20.11, 11]) / 180 *
            math.pi).tolist()
        tag_link_transform = [0.5291, 0.4428, 0.005, math.pi, 0, 0]
        link_frame_id = 'base_link'
    elif calibtag == 'link6tag1':
        #print 'move to q =', (np.array([9.93,39.37,-30.63,60.64,-12.12,-61.79]) / 180 * math.pi).tolist()
        tag_link_transform = [
            -(2.54 + 4 + 1 + 19.23 / 2) / 100.0, 0, 0.01, math.pi, 0,
            math.pi / 2
        ]
        link_frame_id = 'link_6'
    elif calibtag == 'link6tag2':
        tag_link_transform = [
            -(2.54 + 4 + 1 + 19.23 / 2) / 100.0, 0, 0.01, math.pi, 0, 0
        ]
        link_frame_id = 'link_6'
    elif calibtag == 'link6tag3':
        tag_link_transform = [
            -(2.54 + 4 + 1 + 19.23 / 2) / 100.0, 0, 0.01, math.pi, 0,
            -math.pi / 2
        ]
        link_frame_id = 'link_6'
    elif calibtag == 'link6tag4':
        tag_link_transform = [
            -(2.54 + 4 + 1 + 19.23 / 2) / 100.0, 0, 0.01, math.pi, 0, math.pi
        ]
        link_frame_id = 'link_6'

    # visualize it
    pubFrame(br,
             pose=tag_link_transform,
             frame_id='tag',
             parent_frame_id=link_frame_id,
             npub=10)

    tag_map_transform = poseTransform(tag_link_transform, link_frame_id, 'map',
                                      listener)

    apriltag_topic = 'tag_detections'
    while True:
        tagdetect = ROS_Wait_For_Msg(apriltag_topic,
                                     AprilTagDetectionArray).getmsg()
        if len(tagdetect.detections) == 1:
            tag_kinect_transform = pose2list(tagdetect.detections[0].pose.pose)
            break

    tag_kinect_tfm_mat = matrix_from_xyzquat(
        translate=tag_kinect_transform[0:3],
        quaternion=tag_kinect_transform[3:7])
    tag_map_tfm_mat = matrix_from_xyzquat(translate=tag_map_transform[0:3],
                                          quaternion=tag_map_transform[3:7])

    kinect_map_tfm_mat = np.dot(tag_map_tfm_mat,
                                np.linalg.inv(tag_kinect_tfm_mat))

    kinect_map_pose = tfm.translation_from_matrix(kinect_map_tfm_mat).tolist(
    ) + tfm.quaternion_from_matrix(kinect_map_tfm_mat).tolist()

    link6tag = ['link6tag1', 'link6tag2', 'link6tag3', 'link6tag4']
    if calibtag in link6tag:
        print kinect_map_pose
        pubFrame(br,
                 pose=kinect_map_pose,
                 frame_id='new_kinect_pose',
                 parent_frame_id='map',
                 npub=10)
    else:
        realsense_link5_pose = poseTransform(kinect_map_pose, link_frame_id,
                                             'link_5', listener)
        pubFrame(br,
                 pose=realsense_link5_pose,
                 frame_id='new_realsense_pose',
                 parent_frame_id='link_5',
                 npub=10)

        print realsense_link5_pose