コード例 #1
0
def transformObjectsFromCapsenToDesiredFrame(scene, scene_frame_id):
    global _tflistener
    global br
    newscene = copy.deepcopy(scene)
    for i in range(len(scene.objects)):
        poselist_capsen_world = poseTransform(pose2list(scene.objects[i].pose), scene_frame_id, 'map', _tflistener)
        cap_T_our = get_obj_capsentf(scene.objects[i].name) # x,y,z,qx,qy,qz,qw
        poselist_world = transformBack(cap_T_our, poselist_capsen_world)  
        newscene.objects[i].pose = poselist2pose(poselist_world)
        
        #pubFrame(br, poselist_world, 'obj_%s' % scene.objects[i].name, 'map')
    return newscene
コード例 #2
0
def transformObjectsFromCapsenToDesiredFrame(scene, scene_frame_id):
    global _tflistener
    global br
    newscene = copy.deepcopy(scene)
    for i in range(len(scene.objects)):
        poselist_capsen_world = poseTransform(pose2list(scene.objects[i].pose),
                                              scene_frame_id, 'map',
                                              _tflistener)
        cap_T_our = get_obj_capsentf(
            scene.objects[i].name)  # x,y,z,qx,qy,qz,qw
        poselist_world = transformBack(cap_T_our, poselist_capsen_world)
        newscene.objects[i].pose = poselist2pose(poselist_world)

        #pubFrame(br, poselist_world, 'obj_%s' % scene.objects[i].name, 'map')
    return newscene
コード例 #3
0
def _detectOneObject(obj_id, bin_num, kinect_num):
    global _detect_one_object_srv
    global br
    
    print 'In', bcolors.WARNING, '_detectOneObject', bcolors.ENDC, 'obj_ids:', obj_id, 'bin_num:', bin_num
    # filter the point cloud
    pc, foreground_mask = get_filtered_pointcloud([obj_id], bin_num, kinect_num)
    if pc is None:
        return (None, None)
    
    # string[] model_names
    # sensor_msgs/PointCloud2 cloud   # Note: this must be an organized point cloud (e.g., 640x480)
    # bool[] foreground_mask
    # bool find_exact_object_list     # Set to true if you only want to consider scene hypotheses that contain exactly the objects in 'model_names'
    # ObjectConstraint[] constraints  # These apply to all objects
    # ---
    # SceneHypothesis[] detections
    
    
    # 2. prepare constraints
    
    bin_cnstr = get_bin_cnstr()[bin_num] # a list of right \ # left \ # back \  # front  \ # bottom \ # top
    ccnstr = []
    
    tol = 0.9  # larger is more strict
    (trans,rot) = lookupTransform(pc.header.frame_id, '/shelf', _tflistener)
    # 2.1 right
    ccnstr.append( createCapsenConstraint(ObjectConstraint.HALF_SPACE, transformPlane([1,0,0], [bin_cnstr[0],0,0], trans,rot, pc.header.frame_id), tol, bin_num) )
    # 2.2 left
    ccnstr.append( createCapsenConstraint(ObjectConstraint.HALF_SPACE, transformPlane([-1,0,0], [bin_cnstr[1],0,0], trans,rot, pc.header.frame_id), tol, bin_num) )
    # 2.3 back
    ccnstr.append( createCapsenConstraint(ObjectConstraint.HALF_SPACE, transformPlane([0,1,0], [0,bin_cnstr[2],0], trans,rot, pc.header.frame_id), tol, bin_num) )
    # 2.4 front
    ccnstr.append( createCapsenConstraint(ObjectConstraint.HALF_SPACE, transformPlane([0,-1,0], [0,bin_cnstr[3],0], trans,rot, pc.header.frame_id), tol, bin_num) )
    # 2.5 floor
    ccnstr.append( createCapsenConstraint(ObjectConstraint.HALF_SPACE, transformPlane([0,0,1], [0,0,bin_cnstr[4]], trans,rot, pc.header.frame_id), tol, bin_num) )
    # 2.6 top
    ccnstr.append( createCapsenConstraint(ObjectConstraint.HALF_SPACE, transformPlane([0,0,-1], [0,0,bin_cnstr[5]], trans,rot, pc.header.frame_id), tol, bin_num) )
    # 2.7 on floor
    floor_thick = 0.03
    ccnstr.append( createCapsenConstraint(ObjectConstraint.SUPPORTING_PLANE, transformPlane([0,0,1], [0,0, bin_cnstr[4]-floor_thick/2], trans,rot, pc.header.frame_id), tol, bin_num) )
    # string model_name
    # sensor_msgs/PointCloud2 cloud   # Note: this must be an organized point cloud (e.g., 640x480)
    # bool[] foreground_mask
    # ObjectConstraint[] constraints
    # geometry_msgs/Pose true_pose    # for testing
    # ---
    # 
    # ObjectHypothesis[] detections

    with Timer('detect_one_object'):
        # detect using capsen
        service_name = '/detection_service/detect_one_object'
        req = DetectOneObjectRequest()
        req.model_name = obj_id
        req.cloud = pc
        req.constraints = ccnstr
        req.foreground_mask = foreground_mask
        #req.foreground_mask = [True for i in xrange(req.cloud.height*req.cloud.width)]
        
        print 'Waiting for service up: ', service_name
        rospy.wait_for_service(service_name)
        try:
            print 'Calling service:', service_name
            ret = _detect_one_object_srv(req)
            # ret.detections is a list of capsen_vision/ObjectHypothesis
                # string name
                # geometry_msgs/Pose pose
                # float32 score
                # float32[] score_components
            if len(ret.detections)>0:
                print len(ret.detections), 'ObjectHypothesis returned, max score', ret.detections[0].score
                for i in range(len(ret.detections)):
                    poselist_capsen_world = poseTransform(pose2list(ret.detections[i].pose), pc.header.frame_id, 'map', _tflistener)
                    
                    cap_T_our = get_obj_capsentf(obj_id) # x,y,z,qx,qy,qz,qw
                    poselist_world = transformBack(cap_T_our, poselist_capsen_world)    # transform to our desired pose
                    
                    # check whether inside bin
                    poselist_shelf = poseTransform(poselist_world, 'map', 'shelf', _tflistener)
                    if inside_bin(poselist_shelf[0:3], bin_num):
                        #pubFrame(br, poselist_world, 'obj', 'map')
                        return (poselist_world, ret.detections[i].score)
                    else:
                        print 'reject hypo', i, 'because it is outside the target bin'
                print 'No ObjectHypothesis satisfy hard bin constraint'
                return (None, None)
            else:
                print 'No ObjectHypothesis returned'
                return (None, None)
        except:
            print 'Calling service:', service_name, 'failed'
            print 'encounters errors:', traceback.format_exc()
            return (None, None)
コード例 #4
0
def _detectOneObject(obj_id, bin_num, kinect_num):
    global _detect_one_object_srv
    global br

    print 'In', bcolors.WARNING, '_detectOneObject', bcolors.ENDC, 'obj_ids:', obj_id, 'bin_num:', bin_num
    # filter the point cloud
    pc, foreground_mask = get_filtered_pointcloud([obj_id], bin_num,
                                                  kinect_num)
    if pc is None:
        return (None, None)

    # string[] model_names
    # sensor_msgs/PointCloud2 cloud   # Note: this must be an organized point cloud (e.g., 640x480)
    # bool[] foreground_mask
    # bool find_exact_object_list     # Set to true if you only want to consider scene hypotheses that contain exactly the objects in 'model_names'
    # ObjectConstraint[] constraints  # These apply to all objects
    # ---
    # SceneHypothesis[] detections

    # 2. prepare constraints

    bin_cnstr = get_bin_cnstr(
    )[bin_num]  # a list of right \ # left \ # back \  # front  \ # bottom \ # top
    ccnstr = []

    tol = 0.9  # larger is more strict
    (trans, rot) = lookupTransform(pc.header.frame_id, '/shelf', _tflistener)
    # 2.1 right
    ccnstr.append(
        createCapsenConstraint(
            ObjectConstraint.HALF_SPACE,
            transformPlane([1, 0, 0], [bin_cnstr[0], 0, 0], trans, rot,
                           pc.header.frame_id), tol, bin_num))
    # 2.2 left
    ccnstr.append(
        createCapsenConstraint(
            ObjectConstraint.HALF_SPACE,
            transformPlane([-1, 0, 0], [bin_cnstr[1], 0, 0], trans, rot,
                           pc.header.frame_id), tol, bin_num))
    # 2.3 back
    ccnstr.append(
        createCapsenConstraint(
            ObjectConstraint.HALF_SPACE,
            transformPlane([0, 1, 0], [0, bin_cnstr[2], 0], trans, rot,
                           pc.header.frame_id), tol, bin_num))
    # 2.4 front
    ccnstr.append(
        createCapsenConstraint(
            ObjectConstraint.HALF_SPACE,
            transformPlane([0, -1, 0], [0, bin_cnstr[3], 0], trans, rot,
                           pc.header.frame_id), tol, bin_num))
    # 2.5 floor
    ccnstr.append(
        createCapsenConstraint(
            ObjectConstraint.HALF_SPACE,
            transformPlane([0, 0, 1], [0, 0, bin_cnstr[4]], trans, rot,
                           pc.header.frame_id), tol, bin_num))
    # 2.6 top
    ccnstr.append(
        createCapsenConstraint(
            ObjectConstraint.HALF_SPACE,
            transformPlane([0, 0, -1], [0, 0, bin_cnstr[5]], trans, rot,
                           pc.header.frame_id), tol, bin_num))
    # 2.7 on floor
    floor_thick = 0.03
    ccnstr.append(
        createCapsenConstraint(
            ObjectConstraint.SUPPORTING_PLANE,
            transformPlane([0, 0, 1], [0, 0, bin_cnstr[4] - floor_thick / 2],
                           trans, rot, pc.header.frame_id), tol, bin_num))
    # string model_name
    # sensor_msgs/PointCloud2 cloud   # Note: this must be an organized point cloud (e.g., 640x480)
    # bool[] foreground_mask
    # ObjectConstraint[] constraints
    # geometry_msgs/Pose true_pose    # for testing
    # ---
    #
    # ObjectHypothesis[] detections

    with Timer('detect_one_object'):
        # detect using capsen
        service_name = '/detection_service/detect_one_object'
        req = DetectOneObjectRequest()
        req.model_name = obj_id
        req.cloud = pc
        req.constraints = ccnstr
        req.foreground_mask = foreground_mask
        #req.foreground_mask = [True for i in xrange(req.cloud.height*req.cloud.width)]

        print 'Waiting for service up: ', service_name
        rospy.wait_for_service(service_name)
        try:
            print 'Calling service:', service_name
            ret = _detect_one_object_srv(req)
            # ret.detections is a list of capsen_vision/ObjectHypothesis
            # string name
            # geometry_msgs/Pose pose
            # float32 score
            # float32[] score_components
            if len(ret.detections) > 0:
                print len(
                    ret.detections
                ), 'ObjectHypothesis returned, max score', ret.detections[
                    0].score
                for i in range(len(ret.detections)):
                    poselist_capsen_world = poseTransform(
                        pose2list(ret.detections[i].pose), pc.header.frame_id,
                        'map', _tflistener)

                    cap_T_our = get_obj_capsentf(obj_id)  # x,y,z,qx,qy,qz,qw
                    poselist_world = transformBack(
                        cap_T_our,
                        poselist_capsen_world)  # transform to our desired pose

                    # check whether inside bin
                    poselist_shelf = poseTransform(poselist_world, 'map',
                                                   'shelf', _tflistener)
                    if inside_bin(poselist_shelf[0:3], bin_num):
                        #pubFrame(br, poselist_world, 'obj', 'map')
                        return (poselist_world, ret.detections[i].score)
                    else:
                        print 'reject hypo', i, 'because it is outside the target bin'
                print 'No ObjectHypothesis satisfy hard bin constraint'
                return (None, None)
            else:
                print 'No ObjectHypothesis returned'
                return (None, None)
        except:
            print 'Calling service:', service_name, 'failed'
            print 'encounters errors:', traceback.format_exc()
            return (None, None)