コード例 #1
0
def main():
    
    rospy.init_node('capsen_benchmark', anonymous=True)
    
    basefolder = os.getenv('APCDATA_BASE') + '/bagfiles/vision_dataset/Bin0/'
    outputfile = os.getenv('APCDATA_BASE') + '/bagfiles/vision_dataset/Bin0/result.json'
    pc_topic = '/realsense/depth_registered/points'
    bin_num = 0
    mode = 1  # 0 for kinect, 1 for realsense
    item_names = get_immediate_subdirectories(basefolder)
    pubTf = rospy.Publisher('tf', TFMessage, queue_size=10)
    pubPointCloud = rospy.Publisher(pc_topic, PointCloud2, queue_size=1)
    capsen.init()
    
    listener = tf.TransformListener()
    rospy.sleep(0.1)
    
    result = []
    for item_name in item_names:
        #os.system("echo '%s' > %s/capsen_vision/models/models.txt" % (item_name, os.getenv('APCDATA_BASE')) ) # hack
        
        object_folder = os.path.join(basefolder, item_name)
        pose_types = get_immediate_subdirectories(object_folder)
        for pose_type in pose_types:
            posefolder = os.path.join(object_folder, pose_type)
            bagFilenames = get_immediate_files(posefolder)
            
            for bagFilename in bagFilenames:
                bagpath = os.path.join(posefolder, bagFilename)
                bag = rosbag.Bag(bagpath)
                print bagpath
                
                cnt = 0
                for topic, msg, t in bag.read_messages(topics=[pc_topic]):
                    for i in range(3):
                        msg.header.stamp = rospy.Time.now()
                        pubPointCloud.publish(msg)
                        rospy.sleep(0.1)
                    rospy.sleep(0.5)

                    pose = detectOneObject(item_name, [item_name], bin_num, mode)
                    
                    if pose is not None:
                        print item_name, find_object_pose_type(item_name, pose), pose_type, bagFilename, cnt
                        result.append([item_name, pose, find_object_pose_type(item_name, pose), pose_type, bagFilename, cnt])
                    else:
                        print item_name, 'None', pose_type, bagFilename, cnt
                        result.append([item_name, None, None, pose_type, bagFilename, cnt])
                    cnt += 1
                    #raw_input('press enter to continue')
                with open(outputfile, 'w') as outfile:
                    json.dump(result, outfile)
        
    os.system("cp %s/capsen_vision/models/models_full.txt %s/capsen_vision/models/models.txt" % (os.getenv('APCDATA_BASE'), os.getenv('APCDATA_BASE'))) # hack
コード例 #2
0
def perceptFromManualFit(obj_id):
    posesrv = rospy.ServiceProxy('/pose_service', GetPose)   # should move service name out
    rospy.sleep(0.5)
    data = posesrv('','')
    
    pose = pose2list(data.pa.object_list[1].pose)
    return (pose, find_object_pose_type(obj_id, pose))
コード例 #3
0
def perceptFromManualFit(obj_id):
    posesrv = rospy.ServiceProxy('/pose_service',
                                 GetPose)  # should move service name out
    rospy.sleep(0.5)
    data = posesrv('', '')

    pose = pose2list(data.pa.object_list[1].pose)
    return (pose, find_object_pose_type(obj_id, pose))
コード例 #4
0
def percept(obj_pose=None,
            binNum=0,
            obj_id=None,
            bin_contents=None,
            isExecute=True,
            withPause=True):
    capsen.capsen.init()
    print '[Percept] percept heuristic'

    goToHome.goToHome(robotConfig=None,
                      homePos=[1, 0, 1.2],
                      isExecute=isExecute,
                      withPause=withPause)
    kinect_obj_pose = None

    if useKinect:
        print '[Percept] kinect percept'
        kinect_obj_pose, kinect_score = capsen.capsen.detectOneObject(
            obj_id, bin_contents, binNum, mode=0, withScore=True)
        if kinect_obj_pose is None:
            print '[Percept] kinect percept failed.'
        else:
            kinect_pose_type = find_object_pose_type(obj_id, kinect_obj_pose)
            print '[Percept] kinect percept success.'

        if not (objectUseHandVisionList[obj_id]
                or binNum in binUseHandVisionList or kinect_obj_pose is None):
            return kinect_obj_pose, kinect_pose_type

    # turn off kinect
    kinect_stop()

    print '[Percept] Perception heuristic wants to do percept_hand.'
    hand_obj_pose, hand_pose_type, hand_score = percept_hand(
        obj_pose, binNum, obj_id, bin_contents, isExecute, withPause)

    # turn on kinect
    kinect_start()

    if hand_obj_pose and kinect_obj_pose:
        if hand_score >= kinect_score:
            return hand_obj_pose, hand_pose_type
        else:
            return kinect_obj_pose, kinect_pose_type
    elif hand_obj_pose:
        return hand_obj_pose, hand_pose_type
    elif kinect_obj_pose:
        return kinect_obj_pose, kinect_pose_type
    else:
        return None, None
コード例 #5
0
def percept(obj_pose = None, binNum = 0, obj_id = None, bin_contents = None, isExecute = True, withPause = True):
    capsen.capsen.init()  
    print '[Percept] percept heuristic'
    
    goToHome.goToHome(robotConfig=None, homePos = [1,0,1.2], isExecute = isExecute, withPause = withPause)
    kinect_obj_pose = None
    
    if useKinect:
        print '[Percept] kinect percept'
        kinect_obj_pose, kinect_score = capsen.capsen.detectOneObject(obj_id, bin_contents, binNum, mode = 0, withScore = True)
        if kinect_obj_pose is None:
            print '[Percept] kinect percept failed.' 
        else:
            kinect_pose_type = find_object_pose_type(obj_id, kinect_obj_pose)
            print '[Percept] kinect percept success.'
        
        if not (objectUseHandVisionList[obj_id] or binNum in binUseHandVisionList or kinect_obj_pose is None):
            return kinect_obj_pose, kinect_pose_type
    
    # turn off kinect
    kinect_stop()
    
    print '[Percept] Perception heuristic wants to do percept_hand.' 
    hand_obj_pose, hand_pose_type, hand_score = percept_hand(obj_pose, binNum, obj_id, bin_contents, isExecute, withPause)
    
    # turn on kinect
    kinect_start()
    
    if hand_obj_pose and kinect_obj_pose: 
        if hand_score >= kinect_score:
            return hand_obj_pose, hand_pose_type
        else:
            return kinect_obj_pose, kinect_pose_type
    elif hand_obj_pose:
        return hand_obj_pose, hand_pose_type
    elif kinect_obj_pose:
        return kinect_obj_pose, kinect_pose_type
    else:
        return None, None
コード例 #6
0
def main():

    rospy.init_node('capsen_benchmark', anonymous=True)

    basefolder = os.getenv('APCDATA_BASE') + '/bagfiles/vision_dataset/Bin0/'
    outputfile = os.getenv(
        'APCDATA_BASE') + '/bagfiles/vision_dataset/Bin0/result.json'
    pc_topic = '/realsense/depth_registered/points'
    bin_num = 0
    mode = 1  # 0 for kinect, 1 for realsense
    item_names = get_immediate_subdirectories(basefolder)
    pubTf = rospy.Publisher('tf', TFMessage, queue_size=10)
    pubPointCloud = rospy.Publisher(pc_topic, PointCloud2, queue_size=1)
    capsen.init()

    listener = tf.TransformListener()
    rospy.sleep(0.1)

    result = []
    for item_name in item_names:
        #os.system("echo '%s' > %s/capsen_vision/models/models.txt" % (item_name, os.getenv('APCDATA_BASE')) ) # hack

        object_folder = os.path.join(basefolder, item_name)
        pose_types = get_immediate_subdirectories(object_folder)
        for pose_type in pose_types:
            posefolder = os.path.join(object_folder, pose_type)
            bagFilenames = get_immediate_files(posefolder)

            for bagFilename in bagFilenames:
                bagpath = os.path.join(posefolder, bagFilename)
                bag = rosbag.Bag(bagpath)
                print bagpath

                cnt = 0
                for topic, msg, t in bag.read_messages(topics=[pc_topic]):
                    for i in range(3):
                        msg.header.stamp = rospy.Time.now()
                        pubPointCloud.publish(msg)
                        rospy.sleep(0.1)
                    rospy.sleep(0.5)

                    pose = detectOneObject(item_name, [item_name], bin_num,
                                           mode)

                    if pose is not None:
                        print item_name, find_object_pose_type(
                            item_name, pose), pose_type, bagFilename, cnt
                        result.append([
                            item_name, pose,
                            find_object_pose_type(item_name, pose), pose_type,
                            bagFilename, cnt
                        ])
                    else:
                        print item_name, 'None', pose_type, bagFilename, cnt
                        result.append([
                            item_name, None, None, pose_type, bagFilename, cnt
                        ])
                    cnt += 1
                    #raw_input('press enter to continue')
                with open(outputfile, 'w') as outfile:
                    json.dump(result, outfile)

    os.system(
        "cp %s/capsen_vision/models/models_full.txt %s/capsen_vision/models/models.txt"
        % (os.getenv('APCDATA_BASE'), os.getenv('APCDATA_BASE')))  # hack
コード例 #7
0
def main():

    rospy.init_node("capsen_benchmark", anonymous=True)

    basefolder = os.getenv("APCDATA_BASE") + "/bagfiles/vision_dataset/Bin0_Kinect/"
    outputfile = os.getenv("APCDATA_BASE") + "/bagfiles/vision_dataset/Bin0_Kinect/result.json"
    pc_topics = ["/kinect2_1/depth_highres/points", "/kinect2_2/depth_highres/points"]
    bin_num = 0
    mode = 0  # 0 for kinect, 1 for realsense
    item_names = get_immediate_subdirectories(basefolder)
    pubTf = rospy.Publisher("tf", TFMessage, queue_size=10)
    pubPointCloud = {}
    pubPointCloud[pc_topics[0]] = rospy.Publisher(pc_topics[0], PointCloud2, queue_size=1)
    pubPointCloud[pc_topics[1]] = rospy.Publisher(pc_topics[1], PointCloud2, queue_size=1)
    capsen.init()

    listener = tf.TransformListener()
    rospy.sleep(0.1)

    result = []
    for item_name in item_names:
        os.system("echo '%s' > /home/mcube/apcdata/capsen_vision/models/models.txt" % item_name)  # hack

        object_folder = os.path.join(basefolder, item_name)
        pose_types = get_immediate_subdirectories(object_folder)
        for pose_type in pose_types:
            posefolder = os.path.join(object_folder, pose_type)
            bagFilenames = get_immediate_files(posefolder)

            for bagFilename in bagFilenames:
                bagpath = os.path.join(posefolder, bagFilename)
                bag = rosbag.Bag(bagpath)
                print bagpath

                cnt = 0
                flags = {}
                for topic, msg, t in bag.read_messages(topics=pc_topics):
                    for i in range(3):
                        msg.header.stamp = rospy.Time.now()
                        pubPointCloud[topic].publish(msg)
                        rospy.sleep(0.1)
                    rospy.sleep(0.5)

                    flags[topic] = True
                    if pc_topics[0] in flags and pc_topics[1] in flags:
                        pose = detectOneObject(item_name, [item_name], bin_num, mode)

                        if pose is not None:
                            print item_name, find_object_pose_type(item_name, pose), pose_type, bagFilename, cnt
                            result.append(
                                [item_name, pose, find_object_pose_type(item_name, pose), pose_type, bagFilename, cnt]
                            )
                        else:
                            print item_name, "None", pose_type, bagFilename, cnt
                            result.append([item_name, None, None, pose_type, bagFilename, cnt])
                        cnt += 1
                    # raw_input('press enter to continue')
                with open(outputfile, "w") as outfile:
                    json.dump(result, outfile)

    os.system(
        "cp /home/mcube/apcdata/capsen_vision/models/models_full.txt /home/mcube/apcdata/capsen_vision/models/models.txt"
    )  # hack
コード例 #8
0
def percept_hand(obj_pose = None, binNum = 0, obj_id = None, bin_contents = None, isExecute = True, withPause = True):
    capsen.capsen.init()
    home_joint_pose = [0, -0.2, 0.2, 0.01, 1, 1.4]
    planner = IKJoint(target_joint_pos=home_joint_pose); 
    #plan = Plan()
    #plan.q_traj = [home_joint_pose]; 
    plan = planner.plan()
    plan.visualize(); 
    plan.setSpeedByName('yolo'); 
    if withPause:
        print '[Percept] Going back to percept_hand home'
        pause()
    plan.execute()
    
    bin_cnstr = get_bin_inner_cnstr()
    
    # 1. init target poses
    target_poses = []
    refind = 7
    for i in range(12):
        target_pose_7_vert = [1.1756, 0, 0.79966] + tfm.quaternion_from_euler(math.pi, 1.4, math.pi).tolist() # for bin 7 vertical
        target_pose_7_hori = [1.1756, 0, 0.79966] + tfm.quaternion_from_euler(2.2, 0, math.pi/2).tolist()
        target_pose_7_invhori = [1.1756, 0, 0.79966] + tfm.quaternion_from_euler(-math.pi/2, 0, -math.pi/2).tolist()
        
        if i < 3:
            target_pose = copy.deepcopy(target_pose_7_invhori)
            if i == 0:
                target_pose[1] -= 0.06
            target_pose[2] -= 0.08  # z
        elif i<6:
            target_pose = copy.deepcopy(target_pose_7_hori)
            target_pose[0] += 0.03  # x
            target_pose[2] -= 0.04  # z
        elif i<9:
            target_pose = copy.deepcopy(target_pose_7_hori)
        else:
            target_pose = copy.deepcopy(target_pose_7_hori)
            target_pose[2] += 0.1  # z

        target_pose[1] += (bin_cnstr[i][0]+bin_cnstr[i][1])/2 - (bin_cnstr[refind][0]+bin_cnstr[refind][1])/2  # y
        target_pose[2] += (bin_cnstr[i][4]+bin_cnstr[i][5])/2 - (bin_cnstr[refind][4]+bin_cnstr[refind][5])/2  # z
        target_poses.append(target_pose)
    
    y_deltas = [0.08, -0.08]
    caption = ['left', 'right']
    max_score = -100
    max_obj_pose = None
    for i in range(len(y_deltas)):
        # 2. plan to target
        tip_hand_transform = xyzrpy_from_xyzquat([-0.067, 0.027, -0.012, -0.007, 0.704, 0.710, -0.002])  # need to be updated
        
        target_pose = copy.deepcopy(target_poses[binNum])
        target_pose[1] += y_deltas[i]
        plan = None
        planner = IK(target_tip_pos = target_pose[0:3], target_tip_ori = target_pose[3:7], 
                     tip_hand_transform=tip_hand_transform, 
                     target_link='link_5', target_joint_bb=[[6, 1.2, 1.2]])
        for j in range(15):
            planner.ori_tol = 0.5*j/10.0
            planner.pos_tol = 0.01*j/10.0
            
            newplan = planner.plan()
            if newplan.success():
                plan = newplan
                print '[Percept] hand planning success at the trial %d' % j
                break
                
        if plan:
            plan.setSpeedByName('fastest')
            plan.visualize()

            if withPause:
                print '[Percept] Going to percept_hand %s ' % caption[i]
                pause()
            plan.execute()
        else:
            print '[Percept] hand planning failed'
            continue
        
        # 3. percept
        rospy.sleep(0.8)  # wait for new pointcloud
        obj_pose, score = capsen.capsen.detectOneObject(obj_id, bin_contents, binNum, mode = 1, withScore = True)
        if score > max_score:
            max_obj_pose = obj_pose
            max_score = score

    # 4. move back to percept home
    planner = IKJoint(target_joint_pos=home_joint_pose)
    plan = planner.plan()
    plan.visualize(); 
    plan.setSpeedByName('yolo'); 
    if withPause:
        print '[Percept] Going back to percept_hand home'
        pause()
    plan.execute()
    
    goToHome.goToHome(robotConfig=None, homePos = [1,0,1.2], isExecute = isExecute, withPause = withPause)
    
    if max_obj_pose is None:
        return None, None, None
    
    pose_type = find_object_pose_type(obj_id, max_obj_pose)
    
    return max_obj_pose, pose_type, max_score
コード例 #9
0
def percept_hand(obj_pose=None,
                 binNum=0,
                 obj_id=None,
                 bin_contents=None,
                 isExecute=True,
                 withPause=True):
    capsen.capsen.init()
    home_joint_pose = [0, -0.2, 0.2, 0.01, 1, 1.4]
    planner = IKJoint(target_joint_pos=home_joint_pose)
    #plan = Plan()
    #plan.q_traj = [home_joint_pose];
    plan = planner.plan()
    plan.visualize()
    plan.setSpeedByName('yolo')
    if withPause:
        print '[Percept] Going back to percept_hand home'
        pause()
    plan.execute()

    bin_cnstr = get_bin_inner_cnstr()

    # 1. init target poses
    target_poses = []
    refind = 7
    for i in range(12):
        target_pose_7_vert = [1.1756, 0, 0.79966] + tfm.quaternion_from_euler(
            math.pi, 1.4, math.pi).tolist()  # for bin 7 vertical
        target_pose_7_hori = [1.1756, 0, 0.79966] + tfm.quaternion_from_euler(
            2.2, 0, math.pi / 2).tolist()
        target_pose_7_invhori = [1.1756, 0, 0.79966
                                 ] + tfm.quaternion_from_euler(
                                     -math.pi / 2, 0, -math.pi / 2).tolist()

        if i < 3:
            target_pose = copy.deepcopy(target_pose_7_invhori)
            if i == 0:
                target_pose[1] -= 0.06
            target_pose[2] -= 0.08  # z
        elif i < 6:
            target_pose = copy.deepcopy(target_pose_7_hori)
            target_pose[0] += 0.03  # x
            target_pose[2] -= 0.04  # z
        elif i < 9:
            target_pose = copy.deepcopy(target_pose_7_hori)
        else:
            target_pose = copy.deepcopy(target_pose_7_hori)
            target_pose[2] += 0.1  # z

        target_pose[1] += (bin_cnstr[i][0] + bin_cnstr[i][1]) / 2 - (
            bin_cnstr[refind][0] + bin_cnstr[refind][1]) / 2  # y
        target_pose[2] += (bin_cnstr[i][4] + bin_cnstr[i][5]) / 2 - (
            bin_cnstr[refind][4] + bin_cnstr[refind][5]) / 2  # z
        target_poses.append(target_pose)

    y_deltas = [0.08, -0.08]
    caption = ['left', 'right']
    max_score = -100
    max_obj_pose = None
    for i in range(len(y_deltas)):
        # 2. plan to target
        tip_hand_transform = xyzrpy_from_xyzquat(
            [-0.067, 0.027, -0.012, -0.007, 0.704, 0.710,
             -0.002])  # need to be updated

        target_pose = copy.deepcopy(target_poses[binNum])
        target_pose[1] += y_deltas[i]
        plan = None
        planner = IK(target_tip_pos=target_pose[0:3],
                     target_tip_ori=target_pose[3:7],
                     tip_hand_transform=tip_hand_transform,
                     target_link='link_5',
                     target_joint_bb=[[6, 1.2, 1.2]])
        for j in range(15):
            planner.ori_tol = 0.5 * j / 10.0
            planner.pos_tol = 0.01 * j / 10.0

            newplan = planner.plan()
            if newplan.success():
                plan = newplan
                print '[Percept] hand planning success at the trial %d' % j
                break

        if plan:
            plan.setSpeedByName('fastest')
            plan.visualize()

            if withPause:
                print '[Percept] Going to percept_hand %s ' % caption[i]
                pause()
            plan.execute()
        else:
            print '[Percept] hand planning failed'
            continue

        # 3. percept
        rospy.sleep(0.8)  # wait for new pointcloud
        obj_pose, score = capsen.capsen.detectOneObject(obj_id,
                                                        bin_contents,
                                                        binNum,
                                                        mode=1,
                                                        withScore=True)
        if score > max_score:
            max_obj_pose = obj_pose
            max_score = score

    # 4. move back to percept home
    planner = IKJoint(target_joint_pos=home_joint_pose)
    plan = planner.plan()
    plan.visualize()
    plan.setSpeedByName('yolo')
    if withPause:
        print '[Percept] Going back to percept_hand home'
        pause()
    plan.execute()

    goToHome.goToHome(robotConfig=None,
                      homePos=[1, 0, 1.2],
                      isExecute=isExecute,
                      withPause=withPause)

    if max_obj_pose is None:
        return None, None, None

    pose_type = find_object_pose_type(obj_id, max_obj_pose)

    return max_obj_pose, pose_type, max_score