예제 #1
0
    def __init__(self,graspPlanner,cam,options,gripper):
        #topic_name = '/hsrb/head_rgbd_sensor/depth_registered/image_raw'
        


        not_read = True
        while not_read:

            try:
                cam_info = cam.read_info_data()
                if(not cam_info == None):
                    not_read = False
            except:
                rospy.logerr('info not recieved')
       

        self.pcm = PCM()    
        self.options = options
        self.pcm.fromCameraInfo(cam_info)
        self.br = tf.TransformBroadcaster()
        self.tl = tf.TransformListener()
        self.gp = graspPlanner
        self.gripper = gripper
        self.com = COM()
        self.count = 0
예제 #2
0
 def __init__(self):
     self.bfp = True
     self.robot = robot_interface.Robot_Interface()
     self.url = 'http://172.31.76.30:80/ThinkingQ/'
     self.joint_names = ["shoulder_pan_joint",
                    "shoulder_lift_joint", "upperarm_roll_joint",
                    "elbow_flex_joint", "forearm_roll_joint",
                    "wrist_flex_joint", "wrist_roll_joint"]
     self.tf_listener = TransformListener()
     trfm = Transformer()
     self.r2b = trfm.transform_matrix_of_frames(
         'head_camera_rgb_optical_frame', 'base_link')
     self.model = load_model("./model/0.988.h5")
     self.br = TransformBroadcaster()
     self.move_group = MoveGroupInterface("arm", "base_link")
     # self.pose_group = moveit_commander.MoveGroupCommander("arm")
     self.cam = camera.RGBD()
     self.position_cloud = None
     while True:
         try:
             cam_info = self.cam.read_info_data()
             if cam_info is not None:
                 break
         except:
             rospy.logerr('camera info not recieved')
     self.pcm = PCM()
     self.pcm.fromCameraInfo(cam_info)
예제 #3
0
    def __init__(self, graspPlanner, cam, options):
        #topic_name = '/hsrb/head_rgbd_sensor/depth_registered/image_raw'
        suction_action = '/hsrb/suction_control'

        self.suction_control_client = actionlib.SimpleActionClient(
            suction_action, SuctionControlAction)

        try:
            if not self.suction_control_client.wait_for_server(
                    rospy.Duration(_CONNECTION_TIMEOUT)):
                raise Exception(_suction_action + 'does not exist')
        except Exception as e:
            rospy.logerr(e)
            sys.exit(1)

        not_read = True
        while not_read:

            try:
                cam_info = cam.read_info_data()
                if (not cam_info == None):
                    not_read = False
            except:
                rospy.logerr('info not recieved')

        self.pcm = PCM()
        IPython.embed()
        self.pcm.fromCameraInfo(cam_info)
        self.br = tf.TransformBroadcaster()
        self.gp = graspPlanner

        self.options = options
예제 #4
0
def broadcast_poses():
    robot = robot_interface.Robot_Interface()
    cam = camera.RGBD()

    not_read = True
    while not_read:
        try:
            cam_info = cam.read_info_data()
            if (not cam_info == None):
                not_read = False
        except:
            rospy.logerr('info not recieved')

    pcm = PCM()
    pcm.fromCameraInfo(cam_info)
    point = (208 + (323 - 208) / 2, 247 + (424 - 247) / 2)
    print(point)
    #(208.076538,247.264099) (323.411957,242.667325) (204.806457,424.053619) (324.232857,434.011618)
    dep_data = robot.get_img_data()[1]
    print(dep_data)
    dep = robot.get_depth(point, dep_data)
    print(dep)
    br = tf.TransformBroadcaster()
    td_points = pcm.projectPixelTo3dRay(point)
    # pose = np.array([td_points[0], td_points[1], 0.001 * dep])
    norm_pose = np.array(td_points)
    norm_pose = norm_pose / norm_pose[2]
    norm_pose = norm_pose * (dep)
    a = tf.transformations.quaternion_from_euler(ai=-2.355, aj=-3.14, ak=0.0)
    b = tf.transformations.quaternion_from_euler(ai=0.0, aj=0.0, ak=1.57)
    base_rot = tf.transformations.quaternion_multiply(a, b)
    br.sendTransform(norm_pose, base_rot, rospy.Time.now(), 'tree',
                     'head_camera_rgb_optical_frame')
예제 #5
0
    def __init__(self,graspPlanner,cam,options,gripper):
        #topic_name = '/hsrb/head_rgbd_sensor/depth_registered/image_raw'
        not_read = True
        while not_read:
            try:
                cam_info = cam.read_info_data()
                if(not cam_info == None):
                    not_read = False
            except:
                rospy.logerr('info not recieved')
        # Bells and whisltes
        self.pcm = PCM()
        self.pcm.fromCameraInfo(cam_info)
        self.br = tf.TransformBroadcaster()
        self.tl = tf.TransformListener()
        self.gp = graspPlanner
        self.gripper = gripper
        self.options = options
        self.com = COM()

        # See paper for details, used to release grasp if too much force.
        self.tension = Tensioner()

        # Side, need to change this.
        self.side = 'BOTTOM'
        self.offset_x = 0.0 # positive means going to other side of bed (both sides!)
        self.offset_z = 0.0 # negative means going UP to ceiling
        self.apply_offset = False
예제 #6
0
    def __init__(self,cam):
        #topic_name = '/hsrb/head_rgbd_sensor/depth_registered/image_raw'
        


        not_read = True
        while not_read:

            try:
                cam_info = cam.read_info_data()
                if(not cam_info == None):
                    not_read = False
            except:
                rospy.logerr('info not recieved')
       

        self.pcm = PCM()

        self.cam = cam
    
        self.pcm.fromCameraInfo(cam_info)
        self.br = tf.TransformBroadcaster()
        self.tl = tf.TransformListener()

        self.xbox = XboxController()
예제 #7
0
    def __init__(self, cam):
        # Michael Laskey
        not_read = True
        while not_read:
            try:
                cam_info = cam.read_info_data()
                if (not cam_info == None):
                    not_read = False
            except:
                rospy.logerr('info not recieved')
        self.pcm = PCM()
        self.pcm.fromCameraInfo(cam_info)
        self.br = tf.TransformBroadcaster()
        self.tl = tf.TransformListener()

        # Daniel: add a fake frame. :( TODO: need to fix and get rid of this ...
        rospy.sleep(2.0)
        self._create_grasp_pose_fake()
        rospy.sleep(1.0)

        self.count = 0
        self.pose_count = 0

        # Justin Huang
        self._client = actionlib.SimpleActionClient(
            ACTION_SERVER, control_msgs.msg.GripperCommandAction)
        self._client.wait_for_server(rospy.Duration(10))
예제 #8
0
 def __init__(self):
     self.bfp = True
     self.br = tf.TransformBroadcaster()
     self.robot = robot_interface.Robot_Interface()
     self.cam = camera.RGBD()
     while True:
         try:
             cam_info = self.cam.read_info_data()
             if cam_info is not None:
                 break
         except:
             rospy.logerr('info not recieved')
     self.pcm = PCM()
     self.pcm.fromCameraInfo(cam_info)
예제 #9
0
    def __init__(self):
        self.cam = RGBD()

        not_read = True
        while not_read:

            try:
                cam_info = self.cam.read_info_data()
                if (not cam_info == None):
                    not_read = False
            except:
                rospy.logerr('info not recieved')

        self.pcm = PCM()

        self.pcm.fromCameraInfo(cam_info)
예제 #10
0
    def __init__(self, label):
        self.robot = hsrb_interface.Robot()
        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')
        self.gripper = self.robot.get('gripper')
        self.label = label
        self.br = tf.TransformBroadcaster()
        self.sift = cv2.SIFT()

        self.focal = 5.0
        self.baseline = 5.0
        self.poses = []

        self.rgbd = RGBD()

        not_read = True
        while not_read:
            try:
                cam_info = self.rgbd.read_info_data()
                if (not cam_info == None):
                    not_read = False
            except:
                rospy.logerr('info not recieved')
        #IPython.embed()

        #IPython.embed()
        self.pcm = PCM()
        self.pcm.fromCameraInfo(cam_info)

        #IPython.embed()
        self.count = 0
        self.sess = tfl.Session(config=tfl.ConfigProto(
            allow_soft_placement=True))
        # load network
        #IPython.embed()
        self.net = get_network('VGGnet_test')
        # load model
        self.saver = tfl.train.Saver(write_version=tfl.train.SaverDef.V1)
        self.saver.restore(
            self.sess,
            '../Faster-RCNN_TF/model/VGGnet_fast_rcnn_iter_70000.ckpt')
예제 #11
0
 def __init__(self):
     self.bfp = True
     self.cx = 320
     self.cy = 240
     self.ax = 57.5
     self.ay = 45
     self.fx = self.cx / math.tan(self.ax * math.pi / 180.0 / 2)
     self.fy = self.cy / math.tan(self.ay * math.pi / 180.0 / 2)
     self.robot = robot_interface.Robot_Interface()
     self.br = tf.TransformBroadcaster()
     self.move_group = MoveGroupInterface("arm", "base_link")
     self.pose_group = moveit_commander.MoveGroupCommander("arm")
     self.cam = camera.RGBD()
     while True:
         try:
             cam_info = self.cam.read_info_data()
             if cam_info is not None:
                 break
         except:
             rospy.logerr('info not recieved')
     self.pcm = PCM()
     self.pcm.fromCameraInfo(cam_info)
예제 #12
0
    def __init__(self, cam_info):

        self.label = 'bottle'

        self.poses = []

        self.count = 0
        self.sess = tfl.Session()

        self.pcm = PCM()
        self.pcm.fromCameraInfo(cam_info)
        # load network
        #IPython.embed()
        self.br = tf.TransformBroadcaster()

        self.net = get_network('VGGnet_test')
        # load model
        self.saver = tfl.train.Saver(write_version=tfl.train.SaverDef.V1)
        self.saver.restore(
            self.sess,
            '/home/autolab/Workspaces/michael_working/Faster-RCNN_TF/model/VGGnet_fast_rcnn_iter_70000.ckpt'
        )
예제 #13
0
    def __init__(self,options,name = None):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''

        
        if(name == None):
            name = '07_14_15_27_17save.ckpt-12000'
        self.cam = RGBD()
        not_read = True
        while not_read:

            try:
                cam_info = self.cam.read_info_data()
                if(not cam_info == None):
                    not_read = False
            except:
                rospy.logerr('info not recieved')
       

        self.pcm = PCM()
        self.pcm.fromCameraInfo(cam_info)
        self.options = options
        self.detect = Detector(name)
        self.br = tf.TransformBroadcaster()
        self.gp = GraspPlanner()
예제 #14
0
    def __init__(self, base, cam, options):
        #topic_name = '/hsrb/head_rgbd_sensor/depth_registered/image_raw'
        suction_action = '/hsrb/suction_control'

        self.suction_control_client = actionlib.SimpleActionClient(
            suction_action, SuctionControlAction)

        not_read = True
        while not_read:

            try:
                cam_info = cam.read_info_data()
                if (not cam_info == None):
                    not_read = False
            except:
                rospy.logerr('info not recieved')

        self.pcm = PCM()
        self.pcm.fromCameraInfo(cam_info)
        self.br = tf.TransformBroadcaster()
        self.gp = graspPlanner

        self.options = options
        self.base = base