Пример #1
0
class RGBD_Project(object):
    def __init__(self):
        self.cam = RGBD()

        not_read = True
        while not_read:

            try:
                cam_info = self.cam.read_info_data()
                if (not cam_info == None):
                    not_read = False
            except:
                rospy.logerr('info not recieved')

        self.pcm = PCM()

        self.pcm.fromCameraInfo(cam_info)

    def deproject_pose(self, pose):
        """"
        pose = (u_x,u_y,z)

        u_x,u_y correspond to pixel value in image
        x corresponds to depth
        """

        td_points = self.pcm.projectPixelTo3dRay((pose[0], pose[1]))

        norm_pose = np.array(td_points)
        norm_pose = norm_pose / norm_pose[2]
        norm_pose = norm_pose * (cfg.MM_TO_M * pose[2])

        return norm_pose
Пример #2
0
class Reward_Measure():
    def __init__(self):
        '''
		Initialization class for a Policy

		Parameters
		----------
		yumi : An instianted yumi robot 
		com : The common class for the robot
		cam : An open bincam class

		debug : bool 

		A bool to indicate whether or not to display a training set point for 
		debuging. 

		'''
        self.cam = RGBD()
        self.com = COM()

        not_read = True
        while not_read:

            try:
                cam_info = self.cam.read_info_data()
                if (not cam_info == None):
                    not_read = False
            except:
                rospy.logerr('info not recieved')

        #self.side = 'BOTTOM'
        self.cam_info = cam_info
        self.cam = RGBD()
        self.com = COM()

    def capture_data(self):

        data = []
        while True:
            c_img = self.cam.read_color_data()
            d_img = self.cam.read_depth_data()
            if (not c_img == None and not d_img == None):
                datum = {}
                cv2.imshow('debug', c_img)
                cv2.waitKey(30)
                datum['c_img'] = c_img
                datum['d_img'] = d_img
                data.append(datum)
                IPython.embed()

    def position_head(self, whole_body):
        whole_body.move_to_joint_positions({'head_tilt_joint': -0.8})
Пример #3
0
class YoloDetect():

    def __init__(self,options,name = None):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''

        
        if(name == None):
            name = '07_14_15_27_17save.ckpt-12000'
        self.cam = RGBD()
        not_read = True
        while not_read:

            try:
                cam_info = self.cam.read_info_data()
                if(not cam_info == None):
                    not_read = False
            except:
                rospy.logerr('info not recieved')
       

        self.pcm = PCM()
        self.pcm.fromCameraInfo(cam_info)
        self.options = options
        self.detect = Detector(name)
        self.br = tf.TransformBroadcaster()
        self.gp = GraspPlanner()


    def check_depth(self,p_list,d_img):

        w,h = d_img.shape

        color_img = np.zeros([w,h,3])

        color_img[:,:,0] = d_img*(255.0/float(np.max(d_img)))

        for p in p_list:
            print p
            color_img[p[1]-5:p[1]+5,p[0]-5:5+p[0],1] = 255.0

        cv2.imshow('debug',color_img)
        cv2.waitKey(30)

    def get_detect(self,c_img,d_img):
        '''
        Evaluates the current policy and then executes the motion 
        specified in the the common class
        '''


        results = self.detect.numpy_detector(c_img)

        self.poses = []
        #IPython.embed()
        
        p_list = []
        for result in results:
            print result

            x = int(result['box'][0])
            y = int(result['box'][1])
            w = int(result['box'][2] / 2)
            h = int(result['box'][3] / 2)

            p_list.append([x,y])

            #Crop D+img
            d_img_c = d_img[y-h:y+h,x-w,:x+w]
            depth = self.gp.find_mean_depth(d_img_c)
            self.poses.append([result['class'],self.convert_crop([x,y,depth])])

        self.check_depth(p_list,d_img)

    def convert_crop(self,pose):

        pose[0] = self.options.OFFSET_Y + pose[0]
        pose[1] = self.options.OFFSET_X + pose[1]

        return pose
            


    def broadcast_poses(self):
        #while True: 
        poses = self.poses
        count = 0
        for pose in poses:
            
            num_pose = pose[1]
            label = pose[0]

            td_points = self.pcm.projectPixelTo3dRay((num_pose[0],num_pose[1]))
            pose = np.array([td_points[0],td_points[1],0.001*num_pose[2]])
            

            self.br.sendTransform((td_points[0], td_points[1], pose[2]),
                    (0.0, 0.0, 0.0, 1.0),
                    rospy.Time.now(),
                    label,
                    'head_rgbd_sensor_rgb_frame')
            count += 1
Пример #4
0
class Collect_Demos():
    def __init__(self, user_name=None, inject_noise=False, noise_scale=1.0):
        self.robot = hsrb_interface.Robot()

        self.noise = 0.1

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')
        self.gripper = self.robot.get('gripper')
        self.tl = TransformListener()
        self.cam = RGBD()
        time.sleep(5)
        self.b_d = Bottle_Detect(self.cam.read_info_data())

        self.start_recording = False
        self.stop_recording = False

        self.com = COM()

        if (not user_name == None):
            self.com.Options.setup(self.com.Options.root_dir, user_name)

        #self.com.go_to_initial_state(self.whole_body,self.gripper)

        #self.whole_body.move_to_joint_positions({'head_tilt_joint':-0.3})

        self.joystick = JoyStick_X(self.com,
                                   inject_noise=inject_noise,
                                   noise_scale=noise_scale)
        self.torque = Gripper_Torque()
        self.joints = Joint_Positions()

    def proess_state(self):

        img_rgb = self.cam.read_color_data()
        img_depth = self.cam.read_depth_data()

        cur_action, noise_action, time_pub = self.joystick.apply_control()

        state = self.com.format_data(img_rgb, img_depth)

        #cv2.imwrite('frame_'+str(self.count)+'.png',state[0])
        #Save all data
        data = {}
        data['action'] = cur_action
        data['color_img'] = state[0]
        data['depth_img'] = state[1]
        data['noise_action'] = noise_action

        pose = self.whole_body.get_end_effector_pose().pos
        pose = np.array([pose.x, pose.y, pose.z])
        data['robot_pose'] = pose
        # data['action_time'] = time_pub
        # data['image_time'] = self.cam.color_time_stamped

        print "ACTION AT COUNT ", self.count
        print cur_action
        self.count += 1
        self.trajectory.append(data)
        # if(LA.norm(cur_action) > 1e-3):
        # 	print "GOT ACCEPTED"
        # 	self.trajectory.append(data)

    def check_success(self):

        img_rgb = self.cam.read_color_data()
        img_depth = self.cam.read_depth_data()

        success = self.b_d.detect_bottle(img_rgb, img_depth)

        print "BOTTLE FOUND ", success

        return success

    def run(self):

        self.joystick.apply_control()
        cur_recording = self.joystick.get_record_actions_passive()

        if (cur_recording[0] < -0.1):
            print "BEGIN DATA COLLECTION"
            self.start_recording = True
        count = 0

        if (self.start_recording):
            self.count = 0
            self.trajectory = []
            while not self.stop_recording:
                #while count < 20:

                if (self.cam.is_updated):
                    self.proess_state()
                    self.cam.is_updated = False

                cur_recording = self.joystick.get_record_actions()
                if (cur_recording[1] < -0.1):
                    print "END DATA COLLECTION"
                    self.stop_recording = True

                count += 1

            self.check_success()
            q = input('SAVE DATA: y or n: ')

            if (q == 'y'):
                self.com.save_recording(self.trajectory)

            self.start_recording = False
            self.stop_recording = False