Example #1
0
def detectObjects(target_obj_id, obj_ids, bin_num, mode=0):
    nretry = 3
    maxObjects = None
    maxScore = -100  # some very small number
    if mode == 0:
        for j in range(1, 3):  # loop over 2 cams
            for i in range(nretry):
                retObjects, retScore = _detectObjects(obj_ids, bin_num, j)
                if retObjects is not None and retScore > maxScore:
                    target_ind = findTargetInd(retObjects, target_obj_id)
                    if target_ind is not None:
                        maxObjects = retObjects
                        maxScore = retScore
                        maxPose = pose2list(retObjects[target_ind].pose)
                        pubFrame(br, maxPose, 'obj_final', 'map')

    elif mode == 1:  # realsense
        for i in range(nretry):
            retObjects, retScore = _detectObjects(obj_ids, bin_num, 3)
            if retObjects is not None and retScore > maxScore:
                target_ind = findTargetInd(retObjects, target_obj_id)
                if target_ind is not None:
                    maxObjects = retObjects
                    maxScore = retScore
                    maxPose = pose2list(retObjects[target_ind].pose)
                    pubFrame(br, maxPose, 'obj_final', 'map')

    else:
        print 'Mode incorrect!'
        return (None, None)

    return (maxObjects, maxScore)
Example #2
0
def detectOneObject(target_obj_id, obj_ids, bin_num, mode=0, withScore=False):
    # hack
    if toHack:
        # may want to have delay
        if haveDelay:
            nretry = 4
            timeforcapsen = 3.0
            time_ = nretry * timeforcapsen
            if mode == 0:
                time_ *= 2
            rospy.sleep(time_)
            print '[detectOneObject] simulated computation time %.2f sec' % time_
        return randomPoseScore(bin_num, withScore)

    retObjects, retScore = detectObjects(target_obj_id, obj_ids, bin_num, mode)
    # find the target object
    if retObjects is not None:
        for obj in retObjects:
            if obj.name == target_obj_id:
                if withScore:
                    return (pose2list(obj.pose), retScore)
                else:
                    return pose2list(obj.pose)

    if withScore:
        return (None, None)
    else:
        return None
Example #3
0
def frameCallback(msg):
    global br
    global currentTargetPose, currentTargetPoseDirty
    global listener
    time = rospy.Time.now()

    mutex.acquire()
    pubFrame(br, pose2list(currentTargetPose), 'target_pose', 'map', npub=1)

    # mutex.acquire()
    # tmp1 = currentTargetPoseDirty
    # tmp2 = currentTargetPose
    target_pose = pose2list(currentTargetPose)

    # tip_hand_transform = xyzrpy_from_xyzquat([0,0,0,0,0,0,1])
    # planner = IK(target_tip_pos = target_pose[0:3], target_tip_ori = target_pose[3:7], tip_hand_transform=tip_hand_transform,
    # target_link='link_6', ori_tol=0.001, pos_tol=0.0001, useFastIK=True)
    # plan = planner.plan()
    #
    # if plan.success():
    # print 'success'
    # plan.visualize()
    # #plan.execute()  #
    # else:
    # print 'failed'

    # currentTargetPoseDirty = False
    # mutex.release()
    # if tmp1:
    mutex.release()
def detectObjects(target_obj_id, obj_ids, bin_num, mode = 0):
    nretry = 3
    maxObjects = None
    maxScore = -100   # some very small number
    if mode == 0:
        for j in range(1,3):  # loop over 2 cams
            for i in range(nretry):
                retObjects, retScore = _detectObjects(obj_ids, bin_num, j)
                if retObjects is not None and retScore > maxScore:
                    target_ind = findTargetInd(retObjects, target_obj_id)
                    if target_ind is not None:
                        maxObjects = retObjects;  maxScore = retScore;
                        maxPose = pose2list(retObjects[target_ind].pose)
                        pubFrame(br, maxPose, 'obj_final', 'map')

    elif mode == 1:  # realsense
        for i in range(nretry):
            retObjects, retScore = _detectObjects(obj_ids, bin_num, 3)
            if retObjects is not None and retScore > maxScore:
                target_ind = findTargetInd(retObjects, target_obj_id)
                if target_ind is not None:
                    maxObjects = retObjects;  maxScore = retScore;
                    maxPose = pose2list(retObjects[target_ind].pose)
                    pubFrame(br, maxPose, 'obj_final', 'map')
    
    else:
        print 'Mode incorrect!'
        return (None, None)
    
    return (maxObjects, maxScore)
def detectOneObject(target_obj_id, obj_ids, bin_num, mode = 0, withScore = False):
    # hack
    if toHack:
        # may want to have delay
        if haveDelay:
            nretry = 4
            timeforcapsen = 3.0
            time_ = nretry*timeforcapsen
            if mode == 0:
                time_ *= 2
            rospy.sleep(time_)
            print '[detectOneObject] simulated computation time %.2f sec' % time_
        return randomPoseScore(bin_num, withScore)
    
    retObjects, retScore = detectObjects(target_obj_id, obj_ids, bin_num, mode)
    # find the target object
    if retObjects is not None:
        for obj in retObjects:
            if obj.name == target_obj_id:
                if withScore:
                    return (pose2list(obj.pose), retScore)
                else:
                    return pose2list(obj.pose)
    
    if withScore:
        return (None, None)
    else:
        return None
def frameCallback( msg ):
    global br
    global currentTargetPose, currentTargetPoseDirty
    time = rospy.Time.now()
    
    pubFrame(br, pose2list(currentTargetPose), 'target_pose', 'map', npub = 1)
    
    mutex.acquire()
    tmp1 = currentTargetPoseDirty
    tmp2 = currentTargetPose
    currentTargetPoseDirty = False
    mutex.release()
    if tmp1:
        solveIk(pose2list(tmp2))
def perceptFromManualFit(obj_id):
    posesrv = rospy.ServiceProxy('/pose_service', GetPose)   # should move service name out
    rospy.sleep(0.5)
    data = posesrv('','')
    
    pose = pose2list(data.pa.object_list[1].pose)
    return (pose, find_object_pose_type(obj_id, pose))
Example #8
0
def perceptFromManualFit(obj_id):
    posesrv = rospy.ServiceProxy('/pose_service',
                                 GetPose)  # should move service name out
    rospy.sleep(0.5)
    data = posesrv('', '')

    pose = pose2list(data.pa.object_list[1].pose)
    return (pose, find_object_pose_type(obj_id, pose))
def allObjectsInsideBin(scene, bin_num):
    global _tflistener
    for i in range(len(scene.objects)):
        poselist_shelf = poseTransform(pose2list(scene.objects[i].pose), 'map', 'shelf', _tflistener)
        if not inside_bin(poselist_shelf[0:3], bin_num):
            return False
        
    return True
Example #10
0
def allObjectsInsideBin(scene, bin_num):
    global _tflistener
    for i in range(len(scene.objects)):
        poselist_shelf = poseTransform(pose2list(scene.objects[i].pose), 'map',
                                       'shelf', _tflistener)
        if not inside_bin(poselist_shelf[0:3], bin_num):
            return False

    return True
def transformObjectsFromCapsenToDesiredFrame(scene, scene_frame_id):
    global _tflistener
    global br
    newscene = copy.deepcopy(scene)
    for i in range(len(scene.objects)):
        poselist_capsen_world = poseTransform(pose2list(scene.objects[i].pose), scene_frame_id, 'map', _tflistener)
        cap_T_our = get_obj_capsentf(scene.objects[i].name) # x,y,z,qx,qy,qz,qw
        poselist_world = transformBack(cap_T_our, poselist_capsen_world)  
        newscene.objects[i].pose = poselist2pose(poselist_world)
        
        #pubFrame(br, poselist_world, 'obj_%s' % scene.objects[i].name, 'map')
    return newscene
Example #12
0
def transformObjectsFromCapsenToDesiredFrame(scene, scene_frame_id):
    global _tflistener
    global br
    newscene = copy.deepcopy(scene)
    for i in range(len(scene.objects)):
        poselist_capsen_world = poseTransform(pose2list(scene.objects[i].pose),
                                              scene_frame_id, 'map',
                                              _tflistener)
        cap_T_our = get_obj_capsentf(
            scene.objects[i].name)  # x,y,z,qx,qy,qz,qw
        poselist_world = transformBack(cap_T_our, poselist_capsen_world)
        newscene.objects[i].pose = poselist2pose(poselist_world)

        #pubFrame(br, poselist_world, 'obj_%s' % scene.objects[i].name, 'map')
    return newscene
Example #13
0
def processFeedback(feedback):
    global currentTargetPose, currentTargetPoseDirty
    import rospy
    p = feedback.pose.position
    o = feedback.pose.orientation
    print feedback.marker_name + " is now at " + str(p.x) + ", " + str(
        p.y) + ", " + str(p.z) + ", " + str(o.x) + ", " + str(
            o.y) + ", " + str(o.z) + ", " + str(o.w)

    #if feedback.marker_name == 'target_pose':
    mutex.acquire()
    currentTargetPose = feedback.pose
    #currentTargetPose = rosposeTransform(feedback.pose, 'link_6', 'map', listener)
    #currentTargetPoseDirty = True
    solveIk(pose2list(currentTargetPose))
    mutex.release()
def main(argv=None):
    if argv is None:
        argv = sys.argv
        
    rospy.init_node('calib', anonymous=True)
    listener = tf.TransformListener()
    rospy.sleep(0.1)
    br = tf.TransformBroadcaster()
    rospy.sleep(1)
    
    calibtag = 'link6tag1'
    if len(argv) == 2:
        calibtag = argv[1]
    
    # kinect calib joints
    # j1: 9.93
    # j2: 39.46
    # j3: -32.74
    # j4: 69.54
    # j5: -11.26
    # j6: -70.88
    #link6tag1
    #link6tag3
    #link6tag1
    #link6tag3
    # j1: -16.98
    # j2: 65.55
    # j3: -13.48
    # j4: -21.15
    # j5: -51.26
    # j6: 10.63

    #link6tag1
    #link6tag3
    
    #filename = os.environ['APC_BASE']+'/catkin_ws/src/apc_config/camera_extrinsic_calib_data/camera_extrinsic_calib_data.json'
    if calibtag == 'link2tag':
        tag_link_transform = [0, 0.7, -0.2145-0.005, -math.pi, 0, 0]  
        link_frame_id = 'link_2'
    elif calibtag == 'linkbasetag':
        print 'move to q =', (np.array([2.35,-67.73,62.39,-154.9,-30.11,11]) / 180 * math.pi).tolist()
        print 'move to q =', (np.array([2.35,-77.73,72.39,-154.9,-20.11,11]) / 180 * math.pi).tolist()
        tag_link_transform = [0.5291, 0.4428, 0.005, math.pi, 0, 0]  
        link_frame_id = 'base_link'
    elif calibtag == 'link6tag1':
        #print 'move to q =', (np.array([9.93,39.37,-30.63,60.64,-12.12,-61.79]) / 180 * math.pi).tolist()
        tag_link_transform = [-(2.54 + 4 + 1 + 19.23 / 2) / 100.0, 0, 0.01, math.pi, 0, math.pi/2]  
        link_frame_id = 'link_6'
    elif calibtag == 'link6tag2':
        tag_link_transform = [-(2.54 + 4 + 1 + 19.23 / 2) / 100.0, 0, 0.01, math.pi, 0, 0]  
        link_frame_id = 'link_6'
    elif calibtag == 'link6tag3':
        tag_link_transform = [-(2.54 + 4 + 1 + 19.23 / 2) / 100.0, 0, 0.01, math.pi, 0, -math.pi/2]  
        link_frame_id = 'link_6'
    elif calibtag == 'link6tag4':
        tag_link_transform = [-(2.54 + 4 + 1 + 19.23 / 2) / 100.0, 0, 0.01, math.pi, 0, math.pi]  
        link_frame_id = 'link_6'
        
    # visualize it
    pubFrame(br, pose=tag_link_transform, frame_id='tag', parent_frame_id=link_frame_id, npub=10)
    
    
    tag_map_transform = poseTransform(tag_link_transform, link_frame_id, 'map', listener)
    
    apriltag_topic = 'tag_detections' 
    while True:
        tagdetect = ROS_Wait_For_Msg(apriltag_topic, AprilTagDetectionArray).getmsg() 
        if len(tagdetect.detections) == 1:
            tag_kinect_transform = pose2list(tagdetect.detections[0].pose.pose)
            break
    
    tag_kinect_tfm_mat = matrix_from_xyzquat(translate=tag_kinect_transform[0:3], 
                                                     quaternion=tag_kinect_transform[3:7]) 
    tag_map_tfm_mat = matrix_from_xyzquat(translate=tag_map_transform[0:3], 
                                                     quaternion=tag_map_transform[3:7]) 
    
    kinect_map_tfm_mat = np.dot(tag_map_tfm_mat, np.linalg.inv(tag_kinect_tfm_mat))
    
    kinect_map_pose = tfm.translation_from_matrix(kinect_map_tfm_mat).tolist() + tfm.quaternion_from_matrix(kinect_map_tfm_mat).tolist()
    

    link6tag = ['link6tag1', 'link6tag2', 'link6tag3', 'link6tag4']
    if calibtag in link6tag:
        print kinect_map_pose
        pubFrame(br, pose=kinect_map_pose, frame_id='new_kinect_pose', parent_frame_id='map', npub=10)
    else:
        realsense_link5_pose = poseTransform(kinect_map_pose, link_frame_id, 'link_5', listener)
        pubFrame(br, pose=realsense_link5_pose, frame_id='new_realsense_pose', parent_frame_id='link_5', npub=10)
        
        print realsense_link5_pose
def main(argv=None):
    rospy.init_node('calib', anonymous=True)
    listener = tf.TransformListener()
    rospy.sleep(0.1)
    br = tf.TransformBroadcaster()
    rospy.sleep(0.1)
    
    filename = os.environ['APC_BASE']+'/catkin_ws/src/apc_config/shelf_calib_data/shelf_calib_all_Final_apc.json'
    robot_touch_pts = readFromJson(filename)
    
    binNums = [0,2,3,5,6,8,9,11]
    nbin = len(binNums)
    diff_sum = 0
    # shelf_pose/x: 1.92421
    # shelf_pose/y: -0.0124050312627
    # shelf_pose/z: -0.513553368384
    ###########################################
    print 'floor of the shelves'
    for i in binNums:
        pose = coordinateFrameTransform(robot_touch_pts[i][4], '/map', '/shelf', listener)
        touch_pt = pose2list(pose.pose)
        (mouthposition, binFloorHeight) = getBinMouthAndFloor(0, i)  # a bin at row i
        diff = binFloorHeight - touch_pt[2]  # examine difference in z
        print 'bin',i,'(expected - robot_touch)=', diff
        diff_sum += diff
    print 'avg(diff)=', diff_sum/nbin
    offset_x = diff_sum/nbin
    print 'Please move the shelf in z by', offset_x
    print '\twhich is', -0.513553368384 - offset_x
    
    ###########################################
    diff_sum_left = 0
    print 'left wall of the shelves'
    for i in binNums:
        pose = coordinateFrameTransform(robot_touch_pts[i][1], '/map', '/shelf', listener)
        touch_pt = pose2list(pose.pose)
        (leftWall, rightWall) = getBinSideWalls(i)  # a bin at row i
        diff = leftWall - touch_pt[0]  # examine difference in x
        print 'left wall: bin',i,'(expected - robot_touch)=', diff
        diff_sum_left += diff
    print 'avg(diff) left wall=', diff_sum_left/nbin
    
    
    ###########################################
    diff_sum_right = 0
    print 'right wall of the shelves'
    for i in binNums:
        pose = coordinateFrameTransform(robot_touch_pts[i][2], '/map', '/shelf', listener)
        touch_pt = pose2list(pose.pose)
        (leftWall, rightWall) = getBinSideWalls(i)  # a bin at row i
        diff = rightWall - touch_pt[0]  # examine difference in x
        print 'right wall: bin',i,'(expected - robot_touch)=', diff
        diff_sum_right += diff
    print 'avg(diff) right wall=', diff_sum_right/nbin
    
    offset_y = (diff_sum_left/nbin+diff_sum_right/nbin) / 2
    print 'Please move the shelf in y by', offset_y
    print '\twhich is', 0.0124050312627 - offset_y
    
    ###########################################
    diff_sum = 0
    print 'lip to floor of the shelves'
    for i in binNums:
        pose = coordinateFrameTransform(robot_touch_pts[i][0], '/map', '/shelf', listener)
        touch_pt = pose2list(pose.pose)
        (mouthposition, binFloorHeight) = getBinMouthAndFloor(0, i)  # a bin at row i
        diff = binFloorHeight - touch_pt[2]  # examine difference in z
        print 'row',i,'(bin floor - robot_touch)=', diff
        diff_sum += diff
    print 'avg(diff)=', diff_sum/nbin

    offset_y = (diff_sum_left/nbin+diff_sum_right/nbin) / 2
    print 'Please move the shelf in y by', offset_y
    print '\twhich is', 0.0124050312627 - offset_y



    ##########################################  front
    diff_sum_front = 0
    print 'front of the shelves'
    for i in binNums:
        pose = coordinateFrameTransform(robot_touch_pts[i][2], '/map', '/shelf', listener)
        touch_pt = pose2list(pose.pose)
        (leftWall, rightWall) = getBinSideWalls(i)  # a bin at row i
        diff = rightWall - touch_pt[1]  # examine difference in y
        print 'right front wall: bin',i,'(expected - robot_touch)=', diff
        diff_sum_front += diff
    print 'avg(diff) right wall=', diff_sum_front/nbin
    offset_x = diff_sum_front/nbin
    print 'Please move the shelf in x by', offset_x
    print '\twhich is', 1.92421 - offset_x  #- 0.87/2 
Example #16
0
 for y in np.linspace(limits[2],limits[3], nseg[1]):
     for z in np.linspace(limits[4],limits[5], nseg[2]):
         setCart([x,y,z], ori)
         print x,y,z
         # get the apriltag pose from pr_apriltag
         
         apriltag_topic = '/pr_apriltags/detections' 
         #print 'wait for detection at', apriltag_topic
         
         rospy.sleep(2)
         tag_camera_transform = None
         for i in xrange(5):
             tagdetect = ROS_Wait_For_Msg(apriltag_topic, AprilTagDetections).getmsg() 
             tagpose = findtagid(tagdetect.detections, 0)
             if tagpose is not None:
                 tag_camera_transform = pose2list(tagpose)
                 break
             rospy.sleep(0.02)
             
         if tag_camera_transform is None:
             print 'No tag detect'
             continue
         
         # get the link6 from robot
         (apriltrans,aprilrot) = lookupTransform('/map','/link6_apriltag', listener)
         appose_robot = list(apriltrans) + list(aprilrot)
         
         cam_pose = xyzquat_from_matrix( np.dot( matrix_from_xyzquat(appose_robot) , np.linalg.inv(matrix_from_xyzquat(tag_camera_transform))) )
         # appose_robot * inv(tagpose)
             
         xs = xs + [cam_pose[0]]
Example #17
0
def main(argv=None):
    rospy.init_node('calib', anonymous=True)
    listener = tf.TransformListener()
    rospy.sleep(0.1)
    br = tf.TransformBroadcaster()
    rospy.sleep(0.1)
    
    filename = os.environ['APC_BASE']+'/catkin_ws/src/apc_config/shelf_calib_data/shelf_calib.json'
    robot_touch_floor_pts = readFromJson(filename)
    num_rows = 4
    diff_sum = 0
    print 'floor of the shelves'
    for i in range(num_rows):
        pose = coordinateFrameTransform(robot_touch_floor_pts[i], '/map', '/shelf', listener)
        touch_pt = pose2list(pose.pose)
        (mouthposition, binFloorHeight) = getBinMouthAndFloor(0, i*3)  # a bin at row i
        diff = binFloorHeight - touch_pt[2]  # examine difference in z
        print 'row',i,'(expected - robot_touch)=', diff
        diff_sum += diff
    print 'avg(diff)=', diff_sum/num_rows
    
    filename = os.environ['APC_BASE']+'/catkin_ws/src/apc_config/shelf_calib_data/shelf_calib_left_side.json'
    robot_touch_left_pts = readFromJson(filename)
    num_rows = 4
    diff_sum_left = 0
    print 'left wall of the shelves'
    for i in range(num_rows):
        pose = coordinateFrameTransform(robot_touch_left_pts[i], '/map', '/shelf', listener)
        touch_pt = pose2list(pose.pose)
        (leftWall, rightWall) = getBinSideWalls(i*3)  # a bin at row i
        diff = leftWall - touch_pt[0]  # examine difference in x
        print 'left wall: row',i,'(expected - robot_touch)=', diff
        diff_sum_left += diff
    print 'avg(diff) left wall=', diff_sum_left/num_rows
    
    filename = os.environ['APC_BASE']+'/catkin_ws/src/apc_config/shelf_calib_data/shelf_calib_right_side.json'
    robot_touch_right_pts = readFromJson(filename)
    num_rows = 4
    diff_sum_right = 0
    print 'right wall of the shelves'
    for i in range(num_rows):
        pose = coordinateFrameTransform(robot_touch_right_pts[i], '/map', '/shelf', listener)
        touch_pt = pose2list(pose.pose)
        (leftWall, rightWall) = getBinSideWalls(i*3)  # a bin at row i
        diff = rightWall - touch_pt[0]  # examine difference in x
        print 'right wall: row',i,'(expected - robot_touch)=', diff
        diff_sum_right += diff
    print 'avg(diff) right wall=', diff_sum_right/num_rows
    
    filename = os.environ['APC_BASE']+'/catkin_ws/src/apc_config/shelf_calib_data/shelf_calib_right.json'
    robot_touch_lip_pts = readFromJson(filename)
    num_rows = 4
    diff_sum = 0
    print 'lip to floor of the shelves'
    for i in range(num_rows):
        pose = coordinateFrameTransform(robot_touch_lip_pts[i], '/map', '/shelf', listener)
        touch_pt = pose2list(pose.pose)
        (mouthposition, binFloorHeight) = getBinMouthAndFloor(0, i*3)  # a bin at row i
        diff = binFloorHeight - touch_pt[2]  # examine difference in z
        print 'row',i,'(bin floor - robot_touch)=', diff
        diff_sum += diff
    print 'avg(diff)=', diff_sum/num_rows
Example #18
0
    global q0
    q0 = None
    listener = tf.TransformListener()
    rospy.sleep(0.1)
    currentTargetPose = Pose()
    br = TransformBroadcaster()
    currentTargetPoseDirty = False

    # create an interactive marker server on the topic namespace ik_interactive
    server = InteractiveMarkerServer("ik_interactive")

    # create an interactive marker for TargetPose
    #pose = [1.1756, -0.0080496, 0.79966, 0.96262, -0.039399, -0.25934, 0.06743]
    pose = [0, 0, 0, 0, 0, 0, 1]
    currentTargetPose = rosposeTransform(poselist2pose(pose), 'link_6', 'map',
                                         listener)
    pose = pose2list(currentTargetPose)
    int_marker = createInteractiveMarker('target_pose', *pose, frame_id='map')
    # cubemarker = createCubeMarker(offset=tuple(pose[0:3]), rgba=(1,0,1,0.5),
    # orientation=tuple(pose[3:7]),
    # scale=(0.01,0.01,0.01))
    #int_marker.controls.append(cubemarker)
    int_marker.controls.extend(createMoveControls(fixed=False))
    server.insert(int_marker, processFeedback)

    rospy.Timer(rospy.Duration(0.1), frameCallback)

    server.applyChanges()
    print 'ready'
    rospy.spin()
Example #19
0
def main(argv=None):
    rospy.init_node('calib', anonymous=True)
    listener = tf.TransformListener()
    rospy.sleep(0.1)
    br = tf.TransformBroadcaster()
    rospy.sleep(0.1)

    filename = os.environ[
        'APC_BASE'] + '/catkin_ws/src/apc_config/shelf_calib_data/shelf_calib_all_Final_apc.json'
    robot_touch_pts = readFromJson(filename)

    binNums = [0, 2, 3, 5, 6, 8, 9, 11]
    nbin = len(binNums)
    diff_sum = 0
    # shelf_pose/x: 1.92421
    # shelf_pose/y: -0.0124050312627
    # shelf_pose/z: -0.513553368384
    ###########################################
    print 'floor of the shelves'
    for i in binNums:
        pose = coordinateFrameTransform(robot_touch_pts[i][4], '/map',
                                        '/shelf', listener)
        touch_pt = pose2list(pose.pose)
        (mouthposition,
         binFloorHeight) = getBinMouthAndFloor(0, i)  # a bin at row i
        diff = binFloorHeight - touch_pt[2]  # examine difference in z
        print 'bin', i, '(expected - robot_touch)=', diff
        diff_sum += diff
    print 'avg(diff)=', diff_sum / nbin
    offset_x = diff_sum / nbin
    print 'Please move the shelf in z by', offset_x
    print '\twhich is', -0.513553368384 - offset_x

    ###########################################
    diff_sum_left = 0
    print 'left wall of the shelves'
    for i in binNums:
        pose = coordinateFrameTransform(robot_touch_pts[i][1], '/map',
                                        '/shelf', listener)
        touch_pt = pose2list(pose.pose)
        (leftWall, rightWall) = getBinSideWalls(i)  # a bin at row i
        diff = leftWall - touch_pt[0]  # examine difference in x
        print 'left wall: bin', i, '(expected - robot_touch)=', diff
        diff_sum_left += diff
    print 'avg(diff) left wall=', diff_sum_left / nbin

    ###########################################
    diff_sum_right = 0
    print 'right wall of the shelves'
    for i in binNums:
        pose = coordinateFrameTransform(robot_touch_pts[i][2], '/map',
                                        '/shelf', listener)
        touch_pt = pose2list(pose.pose)
        (leftWall, rightWall) = getBinSideWalls(i)  # a bin at row i
        diff = rightWall - touch_pt[0]  # examine difference in x
        print 'right wall: bin', i, '(expected - robot_touch)=', diff
        diff_sum_right += diff
    print 'avg(diff) right wall=', diff_sum_right / nbin

    offset_y = (diff_sum_left / nbin + diff_sum_right / nbin) / 2
    print 'Please move the shelf in y by', offset_y
    print '\twhich is', 0.0124050312627 - offset_y

    ###########################################
    diff_sum = 0
    print 'lip to floor of the shelves'
    for i in binNums:
        pose = coordinateFrameTransform(robot_touch_pts[i][0], '/map',
                                        '/shelf', listener)
        touch_pt = pose2list(pose.pose)
        (mouthposition,
         binFloorHeight) = getBinMouthAndFloor(0, i)  # a bin at row i
        diff = binFloorHeight - touch_pt[2]  # examine difference in z
        print 'row', i, '(bin floor - robot_touch)=', diff
        diff_sum += diff
    print 'avg(diff)=', diff_sum / nbin

    offset_y = (diff_sum_left / nbin + diff_sum_right / nbin) / 2
    print 'Please move the shelf in y by', offset_y
    print '\twhich is', 0.0124050312627 - offset_y

    ##########################################  front
    diff_sum_front = 0
    print 'front of the shelves'
    for i in binNums:
        pose = coordinateFrameTransform(robot_touch_pts[i][2], '/map',
                                        '/shelf', listener)
        touch_pt = pose2list(pose.pose)
        (leftWall, rightWall) = getBinSideWalls(i)  # a bin at row i
        diff = rightWall - touch_pt[1]  # examine difference in y
        print 'right front wall: bin', i, '(expected - robot_touch)=', diff
        diff_sum_front += diff
    print 'avg(diff) right wall=', diff_sum_front / nbin
    offset_x = diff_sum_front / nbin
    print 'Please move the shelf in x by', offset_x
    print '\twhich is', 1.92421 - offset_x  #- 0.87/2
Example #20
0
def main(argv=None):
    if argv is None:
        argv = sys.argv
    rospy.init_node('calib', anonymous=True)
    listener = tf.TransformListener()
    rospy.sleep(0.1)
    br = tf.TransformBroadcaster()
    rospy.sleep(1)
    
    calibtag = 'link6tag1'
    if len(argv) == 2:
        calibtag = argv[1]
    
    #filename = os.environ['APC_BASE']+'/catkin_ws/src/apc_config/camera_extrinsic_calib_data/camera_extrinsic_calib_data.json'
    if calibtag == 'link2tag':
        tag_link_transform = [0, 0.7, -0.2145-0.005, -math.pi, 0, 0]  
        link_frame_id = 'link_2'
    elif calibtag == 'realsense1':
        print 'move to q =', (np.array([ 0.0, -23.02, 34.73, 154.93, -25.15, 0.0]) / 180 * math.pi).tolist()
        
        r = 0.1923/2
        tag_link_transform = [0.4033-0.230 -r, -1.121/2 + 0.044 + r, 0.005, 0, 0, 0]  
        link_frame_id = 'base_link'
    elif calibtag == 'link6tag1':
        #print 'move to q =', (np.array([9.93,39.37,-30.63,60.64,-12.12,-61.79]) / 180 * math.pi).tolist()
        tag_link_transform = [-(2.54 + 4 + 1 + 19.23 / 2) / 100.0, 0, 0.01, math.pi, 0, math.pi/2]  
        link_frame_id = 'link_6'
    elif calibtag == 'link6tag2':
        tag_link_transform = [-(2.54 + 4 + 1 + 19.23 / 2) / 100.0, 0, 0.01, math.pi, 0, 0]  
        link_frame_id = 'link_6'
    elif calibtag == 'link6tag3':
        tag_link_transform = [-(2.54 + 4 + 1 + 19.23 / 2) / 100.0, 0, 0.01, math.pi, 0, -math.pi/2]  
        link_frame_id = 'link_6'
    elif calibtag == 'link6tag4':
        tag_link_transform = [-(2.54 + 4 + 1 + 19.23 / 2) / 100.0, 0, 0.01, math.pi, 0, math.pi]  
        link_frame_id = 'link_6'
        
    # visualize it
    pubFrame(br, pose=tag_link_transform, frame_id='tag', parent_frame_id=link_frame_id, npub=10)
    
    
    tag_map_transform = poseTransform(tag_link_transform, link_frame_id, 'map', listener)
    
    apriltag_topic = '/pr_apriltags/detections' 
    print 'Wait for apriltag detection'
    tag_camera_transforms = []
    for i in xrange(5):
        while True:
            tagdetect = ROS_Wait_For_Msg(apriltag_topic, AprilTagDetections).getmsg() 
            if len(tagdetect.detections) == 1:
                tag_camera_transform = pose2list(tagdetect.detections[0].pose)
                tag_camera_transforms.append(tag_camera_transform)
                break
    
    tag_camera_transform = np.average(np.array(tag_camera_transforms), axis = 0)
    
    tag_camera_tfm_mat = matrix_from_xyzquat(tag_camera_transform) 
    tag_map_tfm_mat = matrix_from_xyzquat(tag_map_transform) 
    
    camera_map_tfm_mat = np.dot(tag_map_tfm_mat, np.linalg.inv(tag_camera_tfm_mat))
    
    camera_map_pose = tfm.translation_from_matrix(camera_map_tfm_mat).tolist() + tfm.quaternion_from_matrix(camera_map_tfm_mat).tolist()
    

    link6tag = ['link6tag1', 'link6tag2', 'link6tag3', 'link6tag4']
    if calibtag in link6tag:
        print camera_map_pose
        pubFrame(br, pose=camera_map_pose, frame_id='new_kinect_pose', parent_frame_id='map', npub=10)
    elif calibtag == 'realsense1':
        realsense_link5_pose = poseTransform(camera_map_pose, link_frame_id, 'link_5', listener)
        pubFrame(br, pose=realsense_link5_pose, frame_id='new_realsense_pose', parent_frame_id='link_5', npub=10)
        
        print realsense_link5_pose
def _detectOneObject(obj_id, bin_num, kinect_num):
    global _detect_one_object_srv
    global br
    
    print 'In', bcolors.WARNING, '_detectOneObject', bcolors.ENDC, 'obj_ids:', obj_id, 'bin_num:', bin_num
    # filter the point cloud
    pc, foreground_mask = get_filtered_pointcloud([obj_id], bin_num, kinect_num)
    if pc is None:
        return (None, None)
    
    # string[] model_names
    # sensor_msgs/PointCloud2 cloud   # Note: this must be an organized point cloud (e.g., 640x480)
    # bool[] foreground_mask
    # bool find_exact_object_list     # Set to true if you only want to consider scene hypotheses that contain exactly the objects in 'model_names'
    # ObjectConstraint[] constraints  # These apply to all objects
    # ---
    # SceneHypothesis[] detections
    
    
    # 2. prepare constraints
    
    bin_cnstr = get_bin_cnstr()[bin_num] # a list of right \ # left \ # back \  # front  \ # bottom \ # top
    ccnstr = []
    
    tol = 0.9  # larger is more strict
    (trans,rot) = lookupTransform(pc.header.frame_id, '/shelf', _tflistener)
    # 2.1 right
    ccnstr.append( createCapsenConstraint(ObjectConstraint.HALF_SPACE, transformPlane([1,0,0], [bin_cnstr[0],0,0], trans,rot, pc.header.frame_id), tol, bin_num) )
    # 2.2 left
    ccnstr.append( createCapsenConstraint(ObjectConstraint.HALF_SPACE, transformPlane([-1,0,0], [bin_cnstr[1],0,0], trans,rot, pc.header.frame_id), tol, bin_num) )
    # 2.3 back
    ccnstr.append( createCapsenConstraint(ObjectConstraint.HALF_SPACE, transformPlane([0,1,0], [0,bin_cnstr[2],0], trans,rot, pc.header.frame_id), tol, bin_num) )
    # 2.4 front
    ccnstr.append( createCapsenConstraint(ObjectConstraint.HALF_SPACE, transformPlane([0,-1,0], [0,bin_cnstr[3],0], trans,rot, pc.header.frame_id), tol, bin_num) )
    # 2.5 floor
    ccnstr.append( createCapsenConstraint(ObjectConstraint.HALF_SPACE, transformPlane([0,0,1], [0,0,bin_cnstr[4]], trans,rot, pc.header.frame_id), tol, bin_num) )
    # 2.6 top
    ccnstr.append( createCapsenConstraint(ObjectConstraint.HALF_SPACE, transformPlane([0,0,-1], [0,0,bin_cnstr[5]], trans,rot, pc.header.frame_id), tol, bin_num) )
    # 2.7 on floor
    floor_thick = 0.03
    ccnstr.append( createCapsenConstraint(ObjectConstraint.SUPPORTING_PLANE, transformPlane([0,0,1], [0,0, bin_cnstr[4]-floor_thick/2], trans,rot, pc.header.frame_id), tol, bin_num) )
    # string model_name
    # sensor_msgs/PointCloud2 cloud   # Note: this must be an organized point cloud (e.g., 640x480)
    # bool[] foreground_mask
    # ObjectConstraint[] constraints
    # geometry_msgs/Pose true_pose    # for testing
    # ---
    # 
    # ObjectHypothesis[] detections

    with Timer('detect_one_object'):
        # detect using capsen
        service_name = '/detection_service/detect_one_object'
        req = DetectOneObjectRequest()
        req.model_name = obj_id
        req.cloud = pc
        req.constraints = ccnstr
        req.foreground_mask = foreground_mask
        #req.foreground_mask = [True for i in xrange(req.cloud.height*req.cloud.width)]
        
        print 'Waiting for service up: ', service_name
        rospy.wait_for_service(service_name)
        try:
            print 'Calling service:', service_name
            ret = _detect_one_object_srv(req)
            # ret.detections is a list of capsen_vision/ObjectHypothesis
                # string name
                # geometry_msgs/Pose pose
                # float32 score
                # float32[] score_components
            if len(ret.detections)>0:
                print len(ret.detections), 'ObjectHypothesis returned, max score', ret.detections[0].score
                for i in range(len(ret.detections)):
                    poselist_capsen_world = poseTransform(pose2list(ret.detections[i].pose), pc.header.frame_id, 'map', _tflistener)
                    
                    cap_T_our = get_obj_capsentf(obj_id) # x,y,z,qx,qy,qz,qw
                    poselist_world = transformBack(cap_T_our, poselist_capsen_world)    # transform to our desired pose
                    
                    # check whether inside bin
                    poselist_shelf = poseTransform(poselist_world, 'map', 'shelf', _tflistener)
                    if inside_bin(poselist_shelf[0:3], bin_num):
                        #pubFrame(br, poselist_world, 'obj', 'map')
                        return (poselist_world, ret.detections[i].score)
                    else:
                        print 'reject hypo', i, 'because it is outside the target bin'
                print 'No ObjectHypothesis satisfy hard bin constraint'
                return (None, None)
            else:
                print 'No ObjectHypothesis returned'
                return (None, None)
        except:
            print 'Calling service:', service_name, 'failed'
            print 'encounters errors:', traceback.format_exc()
            return (None, None)
Example #22
0
def main(argv=None):
    if argv is None:
        argv = sys.argv

    rospy.init_node('calib', anonymous=True)
    listener = tf.TransformListener()
    rospy.sleep(0.1)
    br = tf.TransformBroadcaster()
    rospy.sleep(1)

    calibtag = 'link6tag1'
    if len(argv) == 2:
        calibtag = argv[1]

    # kinect calib joints
    # j1: 9.93
    # j2: 39.46
    # j3: -32.74
    # j4: 69.54
    # j5: -11.26
    # j6: -70.88
    #link6tag1
    #link6tag3
    #link6tag1
    #link6tag3
    # j1: -16.98
    # j2: 65.55
    # j3: -13.48
    # j4: -21.15
    # j5: -51.26
    # j6: 10.63

    #link6tag1
    #link6tag3

    #filename = os.environ['APC_BASE']+'/catkin_ws/src/apc_config/camera_extrinsic_calib_data/camera_extrinsic_calib_data.json'
    if calibtag == 'link2tag':
        tag_link_transform = [0, 0.7, -0.2145 - 0.005, -math.pi, 0, 0]
        link_frame_id = 'link_2'
    elif calibtag == 'linkbasetag':
        print 'move to q =', (
            np.array([2.35, -67.73, 62.39, -154.9, -30.11, 11]) / 180 *
            math.pi).tolist()
        print 'move to q =', (
            np.array([2.35, -77.73, 72.39, -154.9, -20.11, 11]) / 180 *
            math.pi).tolist()
        tag_link_transform = [0.5291, 0.4428, 0.005, math.pi, 0, 0]
        link_frame_id = 'base_link'
    elif calibtag == 'link6tag1':
        #print 'move to q =', (np.array([9.93,39.37,-30.63,60.64,-12.12,-61.79]) / 180 * math.pi).tolist()
        tag_link_transform = [
            -(2.54 + 4 + 1 + 19.23 / 2) / 100.0, 0, 0.01, math.pi, 0,
            math.pi / 2
        ]
        link_frame_id = 'link_6'
    elif calibtag == 'link6tag2':
        tag_link_transform = [
            -(2.54 + 4 + 1 + 19.23 / 2) / 100.0, 0, 0.01, math.pi, 0, 0
        ]
        link_frame_id = 'link_6'
    elif calibtag == 'link6tag3':
        tag_link_transform = [
            -(2.54 + 4 + 1 + 19.23 / 2) / 100.0, 0, 0.01, math.pi, 0,
            -math.pi / 2
        ]
        link_frame_id = 'link_6'
    elif calibtag == 'link6tag4':
        tag_link_transform = [
            -(2.54 + 4 + 1 + 19.23 / 2) / 100.0, 0, 0.01, math.pi, 0, math.pi
        ]
        link_frame_id = 'link_6'

    # visualize it
    pubFrame(br,
             pose=tag_link_transform,
             frame_id='tag',
             parent_frame_id=link_frame_id,
             npub=10)

    tag_map_transform = poseTransform(tag_link_transform, link_frame_id, 'map',
                                      listener)

    apriltag_topic = 'tag_detections'
    while True:
        tagdetect = ROS_Wait_For_Msg(apriltag_topic,
                                     AprilTagDetectionArray).getmsg()
        if len(tagdetect.detections) == 1:
            tag_kinect_transform = pose2list(tagdetect.detections[0].pose.pose)
            break

    tag_kinect_tfm_mat = matrix_from_xyzquat(
        translate=tag_kinect_transform[0:3],
        quaternion=tag_kinect_transform[3:7])
    tag_map_tfm_mat = matrix_from_xyzquat(translate=tag_map_transform[0:3],
                                          quaternion=tag_map_transform[3:7])

    kinect_map_tfm_mat = np.dot(tag_map_tfm_mat,
                                np.linalg.inv(tag_kinect_tfm_mat))

    kinect_map_pose = tfm.translation_from_matrix(kinect_map_tfm_mat).tolist(
    ) + tfm.quaternion_from_matrix(kinect_map_tfm_mat).tolist()

    link6tag = ['link6tag1', 'link6tag2', 'link6tag3', 'link6tag4']
    if calibtag in link6tag:
        print kinect_map_pose
        pubFrame(br,
                 pose=kinect_map_pose,
                 frame_id='new_kinect_pose',
                 parent_frame_id='map',
                 npub=10)
    else:
        realsense_link5_pose = poseTransform(kinect_map_pose, link_frame_id,
                                             'link_5', listener)
        pubFrame(br,
                 pose=realsense_link5_pose,
                 frame_id='new_realsense_pose',
                 parent_frame_id='link_5',
                 npub=10)

        print realsense_link5_pose
Example #23
0
def _detectOneObject(obj_id, bin_num, kinect_num):
    global _detect_one_object_srv
    global br

    print 'In', bcolors.WARNING, '_detectOneObject', bcolors.ENDC, 'obj_ids:', obj_id, 'bin_num:', bin_num
    # filter the point cloud
    pc, foreground_mask = get_filtered_pointcloud([obj_id], bin_num,
                                                  kinect_num)
    if pc is None:
        return (None, None)

    # string[] model_names
    # sensor_msgs/PointCloud2 cloud   # Note: this must be an organized point cloud (e.g., 640x480)
    # bool[] foreground_mask
    # bool find_exact_object_list     # Set to true if you only want to consider scene hypotheses that contain exactly the objects in 'model_names'
    # ObjectConstraint[] constraints  # These apply to all objects
    # ---
    # SceneHypothesis[] detections

    # 2. prepare constraints

    bin_cnstr = get_bin_cnstr(
    )[bin_num]  # a list of right \ # left \ # back \  # front  \ # bottom \ # top
    ccnstr = []

    tol = 0.9  # larger is more strict
    (trans, rot) = lookupTransform(pc.header.frame_id, '/shelf', _tflistener)
    # 2.1 right
    ccnstr.append(
        createCapsenConstraint(
            ObjectConstraint.HALF_SPACE,
            transformPlane([1, 0, 0], [bin_cnstr[0], 0, 0], trans, rot,
                           pc.header.frame_id), tol, bin_num))
    # 2.2 left
    ccnstr.append(
        createCapsenConstraint(
            ObjectConstraint.HALF_SPACE,
            transformPlane([-1, 0, 0], [bin_cnstr[1], 0, 0], trans, rot,
                           pc.header.frame_id), tol, bin_num))
    # 2.3 back
    ccnstr.append(
        createCapsenConstraint(
            ObjectConstraint.HALF_SPACE,
            transformPlane([0, 1, 0], [0, bin_cnstr[2], 0], trans, rot,
                           pc.header.frame_id), tol, bin_num))
    # 2.4 front
    ccnstr.append(
        createCapsenConstraint(
            ObjectConstraint.HALF_SPACE,
            transformPlane([0, -1, 0], [0, bin_cnstr[3], 0], trans, rot,
                           pc.header.frame_id), tol, bin_num))
    # 2.5 floor
    ccnstr.append(
        createCapsenConstraint(
            ObjectConstraint.HALF_SPACE,
            transformPlane([0, 0, 1], [0, 0, bin_cnstr[4]], trans, rot,
                           pc.header.frame_id), tol, bin_num))
    # 2.6 top
    ccnstr.append(
        createCapsenConstraint(
            ObjectConstraint.HALF_SPACE,
            transformPlane([0, 0, -1], [0, 0, bin_cnstr[5]], trans, rot,
                           pc.header.frame_id), tol, bin_num))
    # 2.7 on floor
    floor_thick = 0.03
    ccnstr.append(
        createCapsenConstraint(
            ObjectConstraint.SUPPORTING_PLANE,
            transformPlane([0, 0, 1], [0, 0, bin_cnstr[4] - floor_thick / 2],
                           trans, rot, pc.header.frame_id), tol, bin_num))
    # string model_name
    # sensor_msgs/PointCloud2 cloud   # Note: this must be an organized point cloud (e.g., 640x480)
    # bool[] foreground_mask
    # ObjectConstraint[] constraints
    # geometry_msgs/Pose true_pose    # for testing
    # ---
    #
    # ObjectHypothesis[] detections

    with Timer('detect_one_object'):
        # detect using capsen
        service_name = '/detection_service/detect_one_object'
        req = DetectOneObjectRequest()
        req.model_name = obj_id
        req.cloud = pc
        req.constraints = ccnstr
        req.foreground_mask = foreground_mask
        #req.foreground_mask = [True for i in xrange(req.cloud.height*req.cloud.width)]

        print 'Waiting for service up: ', service_name
        rospy.wait_for_service(service_name)
        try:
            print 'Calling service:', service_name
            ret = _detect_one_object_srv(req)
            # ret.detections is a list of capsen_vision/ObjectHypothesis
            # string name
            # geometry_msgs/Pose pose
            # float32 score
            # float32[] score_components
            if len(ret.detections) > 0:
                print len(
                    ret.detections
                ), 'ObjectHypothesis returned, max score', ret.detections[
                    0].score
                for i in range(len(ret.detections)):
                    poselist_capsen_world = poseTransform(
                        pose2list(ret.detections[i].pose), pc.header.frame_id,
                        'map', _tflistener)

                    cap_T_our = get_obj_capsentf(obj_id)  # x,y,z,qx,qy,qz,qw
                    poselist_world = transformBack(
                        cap_T_our,
                        poselist_capsen_world)  # transform to our desired pose

                    # check whether inside bin
                    poselist_shelf = poseTransform(poselist_world, 'map',
                                                   'shelf', _tflistener)
                    if inside_bin(poselist_shelf[0:3], bin_num):
                        #pubFrame(br, poselist_world, 'obj', 'map')
                        return (poselist_world, ret.detections[i].score)
                    else:
                        print 'reject hypo', i, 'because it is outside the target bin'
                print 'No ObjectHypothesis satisfy hard bin constraint'
                return (None, None)
            else:
                print 'No ObjectHypothesis returned'
                return (None, None)
        except:
            print 'Calling service:', service_name, 'failed'
            print 'encounters errors:', traceback.format_exc()
            return (None, None)