Ejemplo n.º 1
0
	def __init__(self,features,user_name = None):
		
		self.com = COM()
		self.robot = hsrb_interface.Robot()

		self.noise = 0.1
		self.features = features#self.com.binary_image
		self.count = 0
	
		if(not user_name == None):
			self.com.Options.setup(self.com.Options.root_dir,user_name)

		

		self.omni_base = self.robot.get('omni_base')
		self.whole_body = self.robot.get('whole_body')
		self.gripper = self.robot.get('gripper')

		self.com.go_to_initial_state(self.whole_body,self.gripper)

		self.joystick = JoyStick_X(self.com)

		self.policy = Policy(self.com,self.features)

		self.com.change_grip(self.gripper)
Ejemplo n.º 2
0
    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''

        self.robot = hsrb_interface.Robot()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.cam = RGBD()
        self.com = COM()

        #self.com.go_to_initial_state(self.whole_body)

        self.count = 425

        self.joystick = JoyStick_X(self.com)
        self.true_count = 0
Ejemplo n.º 3
0
    def __init__(self, user_name=None, inject_noise=False, noise_scale=1.0):
        self.robot = hsrb_interface.Robot()

        self.noise = 0.1

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')
        self.gripper = self.robot.get('gripper')
        self.tl = TransformListener()
        self.cam = RGBD()
        time.sleep(5)
        self.b_d = Bottle_Detect(self.cam.read_info_data())

        self.start_recording = False
        self.stop_recording = False

        self.com = COM()

        if (not user_name == None):
            self.com.Options.setup(self.com.Options.root_dir, user_name)

        #self.com.go_to_initial_state(self.whole_body,self.gripper)

        #self.whole_body.move_to_joint_positions({'head_tilt_joint':-0.3})

        self.joystick = JoyStick_X(self.com,
                                   inject_noise=inject_noise,
                                   noise_scale=noise_scale)
        self.torque = Gripper_Torque()
        self.joints = Joint_Positions()
Ejemplo n.º 4
0
    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''

        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        #PARAMETERS TO CHANGE
        self.side = 'TOP'

        self.r_count = 0

        self.grasp_count = 0
        self.success_count = 0

        self.true_count = 0
        self.grasp = True

        self.r_count = self.get_rollout_number()

        self.cam = RGBD()
        self.com = COM()

        self.joystick = JoyStick_X(self.com)

        if cfg.USE_WEB_INTERFACE:
            self.wl = Web_Labeler()
        else:
            self.wl = Python_Labeler(cam=self.cam)

        self.com.go_to_initial_state(self.whole_body)

        self.tt = TableTop()
        self.tt.find_table(self.robot)
        self.position_head()

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        #self.test_current_point()
        time.sleep(4)

        #thread.start_new_thread(self.ql.run,())
        print "after thread"
Ejemplo n.º 5
0
class CardPicker():
    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''

        self.robot = hsrb_interface.Robot()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.cam = RGBD()
        self.com = COM()

        #self.com.go_to_initial_state(self.whole_body)

        self.count = 425

        self.joystick = JoyStick_X(self.com)
        self.true_count = 0

    def collect_data(self):

        while True:
            c_img = self.cam.read_color_data()
            d_img = self.cam.read_depth_data()

            c_img, d_img = self.com.format_data(c_img, d_img)

            cv2.imshow('video_feed', c_img)
            cv2.waitKey(30)

            cur_recording = self.joystick.get_record_actions_passive()
            if (cur_recording[0] < -0.1 and self.true_count % 5 == 0):
                print "PHOTO SNAPPED " + str(self.count)
                cv2.imwrite(
                    cfg.IMAGE_PATH + '/frame_' + str(self.count) + '.png',
                    c_img)
                self.count += 1
            self.true_count += 1
Ejemplo n.º 6
0
    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''

        self.robot = hsrb_interface.Robot()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.cam = RGBD()
        self.com = COM()

        #self.com.go_to_initial_state(self.whole_body)

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()
        self.gp = GraspPlanner()
        self.detector = Detector()

        self.joystick = JoyStick_X(self.com)

        self.suction = Suction(self.gp, self.cam, self.com.Options)

        #self.suction.stop()
        #thread.start_new_thread(self.ql.run,())
        print "after thread"
Ejemplo n.º 7
0
class CardPicker():
    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''

        self.robot = hsrb_interface.Robot()

        self.whole_body = self.robot.get('whole_body')

        #self.cam = RGBD()
        self.com = COM()

        self.com.go_to_initial_state(self.whole_body)

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()
        self.gp = GraspPlanner()
        #self.detector = Detector()

        self.joystick = JoyStick_X(self.com)

        #self.suction = Suction(self.gp,self.cam,self.com.Options)

        #self.suction.stop()
        #thread.start_new_thread(self.ql.run,())
        print "after thread"

    def find_mean_depth(self, d_img):
        '''
        Evaluates the current policy and then executes the motion 
        specified in the the common class
        '''

        indx = np.nonzero(d_img)

        mean = np.mean(d_img[indx])

        return

    def card_pick(self):

        while True:

            cur_recording = self.joystick.get_record_actions_passive()
            self.broadcast_transform()

            if (cur_recording[0] < -0.1):

                self.go_to_centroid(self.whole_body)

                #self.com.go_to_initial_state(self.whole_body)

    def broadcast_transform(self):

        try:
            self.br.sendTransform(
                (0.0, 0.0, -0.02),
                tf.transformations.quaternion_from_euler(ai=-0.785,
                                                         aj=0.0,
                                                         ak=0.0),
                rospy.Time.now(), 'transform_ar_marker', 'ar_marker/11')
        except:
            rospy.logerr('ar marker not found')

    def go_to_centroid(self, whole_body):

        whole_body.end_effector_frame = 'hand_l_finger_vacuum_frame'
        nothing = True

        #self.whole_body.move_to_neutral()

        whole_body.move_end_effector_pose(geometry.pose(z=-0.02, ei=-0.785),
                                          'ar_marker/11')
        #whole_body.move_end_effector_by_line((0,0,1),0.02)
        #self.start()

        #whole_body.move_to_joint_positions({'arm_lift_joint':0.23})

    def check_card_found(self):

        # try:
        transforms = self.tl.getFrameStrings()

        cards = []

        for transform in transforms:
            print transform
            if 'card' in transform:
                print 'got here'
                f_p = self.tl.lookupTransform('head_rgbd_sensor_rgb_frame',
                                              transform, rospy.Time(0))
                cards.append(transform)

                return True, cards
        # except:
        return False, []
Ejemplo n.º 8
0
class FindObject():

	def __init__(self,features,user_name = None):
		
		self.com = COM()
		self.robot = hsrb_interface.Robot()

		self.noise = 0.1
		self.features = features#self.com.binary_image
		self.count = 0
	
		if(not user_name == None):
			self.com.Options.setup(self.com.Options.root_dir,user_name)

		

		self.omni_base = self.robot.get('omni_base')
		self.whole_body = self.robot.get('whole_body')
		self.gripper = self.robot.get('gripper')

		self.com.go_to_initial_state(self.whole_body,self.gripper)

		self.joystick = JoyStick_X(self.com)

		self.policy = Policy(self.com,self.features)

		self.com.change_grip(self.gripper)

	def run(self):

		if(self.policy.cam.is_updated):
			self.com.load_net()
			self.policy.rollout()
			self.com.clean_up()

			self.policy.cam.is_updated = False
			self.count += 1
		

	def check_initial_state(self):

		go = False

		while not go:
			img_rgb = self.policy.cam.read_color_data()
			img_depth = self.policy.cam.read_depth_data()
			
			state = self.com.format_data(img_rgb,img_depth)
			cv2.imshow('initial_state', state[0] )
			cv2.waitKey(30)
			self.joystick.apply_control()
			cur_recording = self.joystick.get_record_actions()
			if(cur_recording[1] < -0.1):
				print "BEGIN ROLLOUT"
				go = True


	def go_to_initial_state(self):
		self.com.go_to_initial_state(self.whole_body,self.gripper)


	def check_success(self):

		self.com.clean_up()

		self.b_d = Bottle_Detect()

		img_rgb = self.policy.cam.read_color_data()

		success = self.b_d.detect_bottle(img_rgb)

		self.b_d.clean_up()

		print "BOTTLE FOUND ",success

		return success

	def save_data(self,img_rgb,img_detect):

		state = self.com.format_data(img_rgb,None)

		state['found_object'] = img_detect
		state['object_poses']

	def check_marker(self):

		try: 
			A = self.tl.lookupTransform('head_l_stereo_camera_frame','ar_marker/12', rospy.Time(0))
		except: 
			rospy.logerr('trash not found')
			return False

		return True

	def execute_grasp(self):

		self.com.grip_open(self.gripper)
		
		self.whole_body.end_effector_frame = 'hand_palm_link'
		nothing = True
		
		try:
			self.whole_body.move_end_effector_pose(geometry.pose(y=0.15,z=-0.11, ek=-1.57),'ar_marker/9')
			nothing = False
		except:
			rospy.logerr('mustard bottle found')


		self.com.grip_squeeze(self.gripper)

		self.whole_body.move_end_effector_pose(geometry.pose(z=-0.9),'hand_palm_link')

		self.com.grip_open(self.gripper)
		self.com.grip_squeeze(self.gripper)
Ejemplo n.º 9
0
class FindObject():

	def __init__(self,features,user_name = None):
		
		self.com = COM()
		self.robot = hsrb_interface.Robot()

		self.noise = 0.1
		self.features = features#self.com.binary_image
		self.count = 0
	
		if(not user_name == None):
			self.com.Options.setup(self.com.Options.root_dir,user_name)

		

		self.omni_base = self.robot.get('omni_base')
		self.whole_body = self.robot.get('whole_body')
		self.gripper = self.robot.get('gripper')

		#self.com.go_to_initial_state(self.whole_body,self.gripper)

		self.joystick = JoyStick_X(self.com)
		self.tl = TransformListener() 

		self.policy = Policy(self.com,self.features)

	def run(self):

		if(self.policy.cam.is_updated):
			self.com.load_net()
			self.policy.rollout()
			self.com.clean_up()

			self.policy.cam.is_updated = False
			self.count += 1
		

	def check_initial_state(self):

		go = False
		sticky = True
		while  sticky: 
			self.joystick.apply_control()
			cur_recording = self.joystick.get_record_actions()
			if(cur_recording[1] > -0.1):
				print "BEGIN ROLLOUT"
				sticky  = False


		while not go:
			img_rgb = self.policy.cam.read_color_data()
			img_depth = self.policy.cam.read_depth_data()
			
			state = self.com.format_data(img_rgb,img_depth)
			cv2.imshow('initial_state', state[0] )
			cv2.waitKey(30)
			self.joystick.apply_control()
			cur_recording = self.joystick.get_record_actions()
			if(cur_recording[1] < -0.1):
				print "BEGIN ROLLOUT"
				go = True


	def go_to_initial_state(self):
		self.com.go_to_initial_state(self.whole_body,self.gripper)


	def clear_data(self):
		self.trajectory = []

	def mark_success(self,q):

		state = {}
		if(q == 'y'):
			state['success'] = 1
		else:
			state['success'] = 0

		self.trajectory.append(state)

	def is_goal_objet(self):

		
		try:
		
			full_pose = self.tl.lookupTransform('head_l_stereo_camera_frame','ar_marker/9', rospy.Time(0))
			trans = full_pose[0]

			transforms = self.tl.getFrameStrings()
			poses = []


			

			for transform in transforms:
				if 'bottle' in transform:
					f_p = self.tl.lookupTransform('head_rgbd_sensor_link',transform, rospy.Time(0))
					poses.append(f_p[0])
					print 'augmented pose ',f_p
					print 'ar_marker ', trans


			for pose in poses:
				
				if(LA.norm(pose[2]-0.1-trans[2])< 0.03):
					return True
		except: 
			rospy.logerr('AR MARKER NOT THERE')

		return False


	def check_success(self):

		self.com.clean_up()

		self.b_d = Bottle_Detect(self.policy.cam.read_info_data())

		img_rgb = self.policy.cam.read_color_data()
		img_depth = self.policy.cam.read_depth_data()

		s_obj,img_detect,poses = self.b_d.detect_bottle(img_rgb,img_depth)

		success = self.is_goal_objet()

		self.b_d.clean_up()

		self.process_data(img_rgb,img_detect,poses,success)



		print "BOTTLE FOUND ",success

		return success

	def process_data(self,img_rgb,img_detect,object_poses,success):

		img_rgb_cr,img_d = self.com.format_data(img_rgb,None)

		state = {}
		state['color_img'] = img_rgb_cr
		state['found_object'] = img_detect
		state['object_poses'] = object_poses
		state['was_success'] = success

		self.trajectory.append(state)

	def save_data(self):

		self.com.save_evaluation(self.trajectory)


	def check_marker(self):

		try: 
			A = self.tl.lookupTransform('head_l_stereo_camera_frame','ar_marker/9', rospy.Time(0))
		except: 
			rospy.logerr('trash not found')
			return False

		return True


	def execute_grasp(self):

		self.com.grip_open(self.gripper)
		self.whole_body.end_effector_frame = 'hand_palm_link'
		nothing = True
		
		try:
			self.whole_body.move_end_effector_pose(geometry.pose(y=0.15,z=-0.09, ek=-1.57),'ar_marker/9')
			
		except:
			rospy.logerr('mustard bottle found')

		self.com.grip_squeeze(self.gripper)

		self.whole_body.move_end_effector_pose(geometry.pose(z=-0.9),'hand_palm_link')

		self.com.grip_open(self.gripper)
		self.com.grip_squeeze(self.gripper)
Ejemplo n.º 10
0
class BedMaker():
    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''

        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        #PARAMETERS TO CHANGE
        self.side = 'TOP'

        self.r_count = 0

        self.grasp_count = 0
        self.success_count = 0

        self.true_count = 0
        self.grasp = True

        self.r_count = self.get_rollout_number()

        self.cam = RGBD()
        self.com = COM()

        self.joystick = JoyStick_X(self.com)

        if cfg.USE_WEB_INTERFACE:
            self.wl = Web_Labeler()
        else:
            self.wl = Python_Labeler(cam=self.cam)

        self.com.go_to_initial_state(self.whole_body)

        self.tt = TableTop()
        self.tt.find_table(self.robot)
        self.position_head()

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        #self.test_current_point()
        time.sleep(4)

        #thread.start_new_thread(self.ql.run,())
        print "after thread"

    def get_rollout_number(self):

        if self.side == "BOTTOM":
            rollouts = glob.glob(cfg.FAST_PATH + 'b_grasp/*.png')
        else:
            rollouts = glob.glob(cfg.FAST_PATH + 't_grasp/*.png')
        r_nums = []
        for r in rollouts:

            a = r[56:]

            i = a.find('_')

            r_num = int(a[:i])

            r_nums.append(r_num)

        return max(r_nums) + 1

    def position_head(self):

        if self.side == "TOP":
            self.tt.move_to_pose(self.omni_base, 'right_down')
            self.tt.move_to_pose(self.omni_base, 'right_up')

            self.tt.move_to_pose(self.omni_base, 'top_mid')
            self.whole_body.move_to_joint_positions({'head_tilt_joint': -0.8})
        elif self.side == "BOTTOM":
            self.tt.move_to_pose(self.omni_base, 'lower_start')
            self.whole_body.move_to_joint_positions({'head_tilt_joint': -0.8})

    def collect_data_bed(self):

        while True:
            c_img = self.cam.read_color_data()
            d_img = self.cam.read_depth_data()

            cv2.imshow('video_feed', c_img)
            cv2.waitKey(30)

            cur_recording = self.joystick.get_record_actions_passive()
            if (cur_recording[0] < -0.1 and self.true_count % 20 == 0):
                print "PHOTO SNAPPED "
                self.save_image(c_img)

                if self.grasp:
                    self.grasp_count += 1
                    self.grasp = False
                else:
                    self.success_count += 1
                    self.grasp = True

            if (cur_recording[1] < -0.1 and self.true_count % 20 == 0):
                print "ROLLOUT DONE "
                self.r_count += 1
                self.grasp_count = 0
                self.success_count = 0
                self.grasp = True

            self.true_count += 1

    def save_image(self, c_img):

        if self.side == "BOTTOM":
            if self.grasp:
                cv2.imwrite(
                    cfg.FAST_PATH + 'b_grasp/frame_' + str(self.r_count) +
                    '_' + str(self.grasp_count) + '.png', c_img)
            else:
                cv2.imwrite(
                    cfg.FAST_PATH + 'b_success/frame_' + str(self.r_count) +
                    '_' + str(self.success_count) + '.png', c_img)

        else:
            if self.grasp:
                cv2.imwrite(
                    cfg.FAST_PATH + 't_grasp/frame_' + str(self.r_count) +
                    '_' + str(self.grasp_count) + '.png', c_img)
            else:
                cv2.imwrite(
                    cfg.FAST_PATH + 't_success/frame_' + str(self.r_count) +
                    '_' + str(self.success_count) + '.png', c_img)
Ejemplo n.º 11
0
class CardPicker():
    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''

        self.robot = hsrb_interface.Robot()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.cam = RGBD()
        self.com = COM()

        #self.com.go_to_initial_state(self.whole_body)

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()
        self.gp = GraspPlanner()
        self.detector = Detector()

        self.joystick = JoyStick_X(self.com)

        self.suction = Suction(self.gp, self.cam, self.com.Options)

        #self.suction.stop()
        #thread.start_new_thread(self.ql.run,())
        print "after thread"

    def find_mean_depth(self, d_img):
        '''
        Evaluates the current policy and then executes the motion 
        specified in the the common class
        '''

        indx = np.nonzero(d_img)

        mean = np.mean(d_img[indx])

        return

    def find_card(self):

        self.whole_body.move_to_neutral()
        self.whole_body.move_to_joint_positions({
            'arm_flex_joint': -1.57,
            'head_tilt_joint': -.785
        })
        print self.check_card_found()[0]
        i = 0
        while True:
            print "panning ", i
            self.whole_body.move_to_joint_positions({'head_pan_joint': .30})
            i += 1
            if self.check_card_found()[0]:
                break
        print "found card!"
        self.card_pick()

    def card_pick(self):

        while True:

            c_img = self.cam.read_color_data()
            c_img_c = np.copy(c_img)
            cv2.imshow('debug_true', c_img_c)
            cv2.waitKey(30)
            d_img = self.cam.read_depth_data()
            if (not c_img == None and not d_img == None):
                c_img_cropped, d_img = self.com.format_data(
                    np.copy(c_img), d_img)

                data = self.detector.numpy_detector(np.copy(c_img_cropped))

                cur_recording = self.joystick.get_record_actions_passive()
                self.suction.find_pick_region_net(data, c_img, d_img, c_img_c)
                if (cur_recording[0] < -0.1):

                    card_found, cards = self.check_card_found()

                    if (card_found):
                        self.suction.execute_grasp(cards, self.whole_body)

                        self.com.go_to_initial_state(self.whole_body)

    def check_card_found(self):

        # try:
        transforms = self.tl.getFrameStrings()

        cards = []

        for transform in transforms:
            #print transform
            if 'card' in transform:
                print 'got here'
                f_p = self.tl.lookupTransform('head_rgbd_sensor_rgb_frame',
                                              transform, rospy.Time(0))
                cards.append(transform)

                return True, cards
        # except:
        return False, []
Ejemplo n.º 12
0
from il_ros_hsr.core.grasp_planner import GraspPlanner

from il_ros_hsr.p_pi.bed_making.com import Bed_COM as COM
import sys
sys.path.append('/home/autolab/Workspaces/michael_working/fast_grasp_detect/')

from skvideo.io import vwrite
from il_ros_hsr.core.joystick_X import JoyStick_X

if __name__ == '__main__':

    robot = hsrb_interface.Robot()
    videos_color = []
    cam = RGBD()

    jy = JoyStick_X()

    while True:

        time.sleep(0.05)
        img = cam.read_color_data()
        if (not img == None):
            cv2.imshow('debug2', img)
            cv2.waitKey(30)
            img = img[:, :, (2, 1, 0)]
            videos_color.append(img)

            cur_recording = jy.get_record_actions_passive()
            if (cur_recording[0] < -0.1):
                print "SAVING VIDEO"
Ejemplo n.º 13
0
    def __init__(self):
        """
        For faster data collection where we manually simulate it.
        We move with our hands.  This will give us the large datasets we need.

        Supports both grasping and success net data collection. If doing the
        grasping, DON'T MAKE IT A SUCCESS CASE where the blanket is all the way
        over the corner. That way we can use the images for both grasping and
        as failure cases for the success net.
        
        For the success net data collection, collect data at roughly a 5:1 ratio
        of successes:failures, and make failures the borderline cases. Then we
        borrow data from the grasping network to make it 5:5 or 1:1 for the actual
        success net training process (use another script for forming the data).
        We use the keys on the joystick to indicate the success/failure class.
        """
        makedirs()
        self.robot = robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()
        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')
        self.cam = RGBD()
        self.com = COM()
        self.wl = Python_Labeler(cam=self.cam)

        # ----------------------------------------------------------------------
        # PARAMETERS TO CHANGE  (well, really the 'side' and 'grasp' only).
        # We choose a fixed side and collect data from there, no switching.
        # Automatically saves based on `r_count` and counting the saved files.
        # `self.grasp` remains FIXED in the code, so we're either only
        # collecting grasp or only collecting success images.
        # ----------------------------------------------------------------------
        self.side = 'BOTTOM'  # CHANGE AS NEEDED
        self.grasp = False  # CHANGE AS NEEDED
        self.grasp_count = 0
        self.success_count = 0
        self.true_count = 0
        self.r_count = self.get_rollout_number()
        self.joystick = JoyStick_X(self.com)
        print("NOTE: grasp={} (success={}), side: {}, rollout num: {}".format(
            self.grasp, not self.grasp, self.side, self.r_count))
        print("Press X for any SUCCESS (class 0), Y for FAILURES (class 1).")

        # Set up initial state, table, etc.
        self.com.go_to_initial_state(self.whole_body)
        self.tt = TableTop()

        # For now, a workaround. Ugly but it should do the job ...
        #self.tt.find_table(robot)
        self.tt.make_fake_ar()
        self.tt.find_table_workaround(robot)

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()
        time.sleep(4)

        # When we start, spin this so we can check the frames. Then un-comment,
        # etc. It's the current hack we have to get around crummy AR marker detection.
        #rospy.spin()

        # THEN we position the head since that involves moving the _base_.
        self.position_head()
Ejemplo n.º 14
0
class BedMaker():
    def __init__(self):
        """
        For faster data collection where we manually simulate it.
        We move with our hands.  This will give us the large datasets we need.

        Supports both grasping and success net data collection. If doing the
        grasping, DON'T MAKE IT A SUCCESS CASE where the blanket is all the way
        over the corner. That way we can use the images for both grasping and
        as failure cases for the success net.
        
        For the success net data collection, collect data at roughly a 5:1 ratio
        of successes:failures, and make failures the borderline cases. Then we
        borrow data from the grasping network to make it 5:5 or 1:1 for the actual
        success net training process (use another script for forming the data).
        We use the keys on the joystick to indicate the success/failure class.
        """
        makedirs()
        self.robot = robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()
        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')
        self.cam = RGBD()
        self.com = COM()
        self.wl = Python_Labeler(cam=self.cam)

        # ----------------------------------------------------------------------
        # PARAMETERS TO CHANGE  (well, really the 'side' and 'grasp' only).
        # We choose a fixed side and collect data from there, no switching.
        # Automatically saves based on `r_count` and counting the saved files.
        # `self.grasp` remains FIXED in the code, so we're either only
        # collecting grasp or only collecting success images.
        # ----------------------------------------------------------------------
        self.side = 'BOTTOM'  # CHANGE AS NEEDED
        self.grasp = False  # CHANGE AS NEEDED
        self.grasp_count = 0
        self.success_count = 0
        self.true_count = 0
        self.r_count = self.get_rollout_number()
        self.joystick = JoyStick_X(self.com)
        print("NOTE: grasp={} (success={}), side: {}, rollout num: {}".format(
            self.grasp, not self.grasp, self.side, self.r_count))
        print("Press X for any SUCCESS (class 0), Y for FAILURES (class 1).")

        # Set up initial state, table, etc.
        self.com.go_to_initial_state(self.whole_body)
        self.tt = TableTop()

        # For now, a workaround. Ugly but it should do the job ...
        #self.tt.find_table(robot)
        self.tt.make_fake_ar()
        self.tt.find_table_workaround(robot)

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()
        time.sleep(4)

        # When we start, spin this so we can check the frames. Then un-comment,
        # etc. It's the current hack we have to get around crummy AR marker detection.
        #rospy.spin()

        # THEN we position the head since that involves moving the _base_.
        self.position_head()

    def get_rollout_number(self):
        """Had to modify this a bit from Michael's code. Test+see if it works.

        For now, let's save based on how many `data.pkl` files we have in the
        appropriate directory.
        """
        if self.side == "BOTTOM":
            nextdir = 'b_grasp'
            if not self.grasp:
                nextdir = 'b_success'
            rollouts = sorted([
                x for x in os.listdir(join(FAST_PATH, nextdir))
                if 'data' in x and 'pkl' in x
            ])
        else:
            nextdir = 't_grasp'
            if not self.grasp:
                nextdir = 't_success'
            rollouts = sorted([
                x for x in os.listdir(join(FAST_PATH, nextdir))
                if 'data' in x and 'pkl' in x
            ])
        return len(rollouts)

    def position_head(self):
        """Ah, I see, we can go straight to the top. Whew.

        It's new code reflecting the different poses and HSR joints:
        But, see important note below about commenting out four lines ...
        """
        self.whole_body.move_to_go()
        if self.side == "BOTTOM":
            self.tt.move_to_pose(self.omni_base, 'lower_start_tmp')
        else:
            self.tt.move_to_pose(self.omni_base, 'right_down')
            self.tt.move_to_pose(self.omni_base, 'right_up')
            self.tt.move_to_pose(self.omni_base, 'top_mid_tmp')
        # NOTE: If robot is already at the top, I don't want to adjust position.
        # Thus, comment out the three lines and the preceding `else` statement.

        self.whole_body.move_to_joint_positions(
            {'arm_flex_joint': -np.pi / 16.0})
        self.whole_body.move_to_joint_positions(
            {'head_pan_joint': np.pi / 2.0})
        self.whole_body.move_to_joint_positions({'arm_lift_joint': 0.120})
        self.whole_body.move_to_joint_positions(
            {'head_tilt_joint': -np.pi / 4.0})  # -np.pi/36.0})

    def collect_data_grasp_only(self):
        """Collect data for the grasping network only, like H's method.

        Actually, some of these images should likely be part of the success
        network training, where the 'success=False' because I don't think I
        collected data here that was considered a 'success=True' ...
        """
        data = []
        assert self.grasp
        rc = str(self.r_count).zfill(3)

        while True:
            c_img = self.cam.read_color_data()
            d_img = self.cam.read_depth_data()

            # Continually show the camera image on screen and wait for user.
            # Doing `k=cv2.waitKey(33)` and `print(k)` results in -1 except for
            # when we press the joystick in a certain configuration.
            cv2.imshow('video_feed', c_img)
            cv2.waitKey(30)

            # Here's what they mean. Y is top, and going counterclockwise:
            # Y: [ 1,  0]
            # X: [-1,  0] # this is what we want for image collection
            # A: [ 0,  1]
            # B: [ 0, -1] # use to terminate the rollout
            #
            # There is also a bit of a delay embedded, i.e., repeated clicking
            # of `X` won't save images until some time has passed. Good! It is
            # also necessary to press for a few milliseconds (and not just tap).
            cur_recording = self.joystick.get_record_actions_passive()

            if (cur_recording[0] < -0.1 and self.true_count % 20 == 0):
                print(
                    "PHOTO SNAPPED (cur_recording: {})".format(cur_recording))
                self.save_image(c_img, d_img)
                self.grasp_count += 1

                # Add to dictionary info we want, including target pose.
                # Also add 'type' key since data augmentation code uses it.
                pose = red_contour(c_img)
                info = {
                    'c_img': c_img,
                    'd_img': d_img,
                    'pose': pose,
                    'type': grasp
                }
                data.append(info)
                print("  image {}, pose: {}".format(len(data), pose))

                # --------------------------------------------------------------
                # We better save each time since we might get a failure to
                # detect, thus losing some data. We overwrite existing saved
                # files, which is fine since it's the current rollout `r_count`.
                # Since we detect pose before this, if the pose isn't detected,
                # we don't save. Good.
                # --------------------------------------------------------------
                if self.side == 'BOTTOM':
                    save_path = join(FAST_PATH, 'b_grasp',
                                     'data_{}.pkl'.format(rc))
                else:
                    save_path = join(FAST_PATH, 't_grasp',
                                     'data_{}.pkl'.format(rc))
                with open(save_path, 'w') as f:
                    pickle.dump(data, f)

            # Kill the script and re-position HSR to get diversity in camera views.
            if (cur_recording[1] < -0.1 and self.true_count % 20 == 0):
                print("ROLLOUT DONE (cur_recording: {})".format(cur_recording))
                print("Length is {}. See our saved pickle files.".format(
                    len(data)))
                sys.exit()

            # Necessary, otherwise we'd save 3-4 times per click.
            self.true_count += 1

    def collect_data_success_only(self):
        """Collect data for the success network.
        
        Should be more emphasis on the success cases (not failures) because the
        grasing network data can supplement the failures. Focus on _borderline_
        failures in this method.

        Recall that 0 = successful grasp, 1 = failed grasp.

        SAVE AND EXIT FREQUENTLY, perhaps after every 15-20 images. It's easy to
        make a mistake with the class label, so better to exit early often.
        """
        data = []
        assert not self.grasp
        rc = str(self.r_count).zfill(3)

        while True:
            c_img = self.cam.read_color_data()
            d_img = self.cam.read_depth_data()
            cv2.imshow('video_feed', c_img)
            cv2.waitKey(30)
            cur_recording = self.joystick.get_record_actions_passive()

            # Joystick controllers. Y is top, and going counterclockwise:
            # Y: [ 1,  0] # FAILURE images (class index 1)
            # X: [-1,  0] # SUCCESS images (class index 0)
            # A: [ 0,  1]
            # B: [ 0, -1] # terminate data collection
            # ------------------------------------------------------------------
            if (cur_recording[0] < -0.1
                    or cur_recording[0] > 0.1) and self.true_count % 20 == 0:
                print(
                    "PHOTO SNAPPED (cur_recording: {})".format(cur_recording))
                if cur_recording[0] < -0.1:
                    s_class = 0
                elif cur_recording[0] > 0.1:
                    s_class = 1
                else:
                    raise ValueError(cur_recording)
                self.save_image(c_img, d_img, success_class=s_class)
                self.success_count += 1

                # Add to dictionary info we want, including the class.
                info = {
                    'c_img': c_img,
                    'd_img': d_img,
                    'class': s_class,
                    'type': 'success'
                }
                data.append(info)
                print("  image {}, class: {}".format(len(data), s_class))

                if self.side == 'BOTTOM':
                    save_path = join(FAST_PATH, 'b_success',
                                     'data_{}.pkl'.format(rc))
                else:
                    save_path = join(FAST_PATH, 't_success',
                                     'data_{}.pkl'.format(rc))
                with open(save_path, 'w') as f:
                    pickle.dump(data, f)

            # Kill the script and re-position HSR to get diversity in camera views.
            if (cur_recording[1] < -0.1 and self.true_count % 20 == 0):
                print("ROLLOUT DONE (cur_recording: {})".format(cur_recording))
                print("Length is {}. See our saved pickle files.".format(
                    len(data)))
                sys.exit()

            # Necessary, otherwise we'd save 3-4 times per click.
            self.true_count += 1

    def save_image(self, c_img, d_img, success_class=None):
        """Save images. Don't forget to process depth images.

        For now I'm using a tuned cutoff like 1400, at least to _visualize_.
        NOTE: since the cutoff for turning depth images into black may change,
        it would be better to save the original d_img in a dictionary. Don't use
        cv2.imwrite() as I know from experience that it won't work as desired.
        """
        rc = str(self.r_count).zfill(3)
        f_rc_grasp = 'frame_{}_{}.png'.format(rc,
                                              str(self.grasp_count).zfill(2))
        f_rc_success = 'frame_{}_{}_class_{}.png'.format(
            rc,
            str(self.success_count).zfill(2), success_class)
        if np.isnan(np.sum(d_img)):
            cv2.patchNaNs(d_img, 0.0)
        d_img = depth_to_net_dim(d_img, cutoff=1400)  # for visualization only

        if self.side == "BOTTOM":
            if self.grasp:
                pth1 = join(FAST_PATH, 'b_grasp', 'rgb_' + f_rc_grasp)
                pth2 = join(FAST_PATH, 'b_grasp', 'depth_' + f_rc_grasp)
            else:
                pth1 = join(FAST_PATH, 'b_success', 'rgb_' + f_rc_success)
                pth2 = join(FAST_PATH, 'b_success', 'depth_' + f_rc_success)
        else:
            if self.grasp:
                pth1 = join(FAST_PATH, 't_grasp', 'rgb_' + f_rc_grasp)
                pth2 = join(FAST_PATH, 't_grasp', 'depth_' + f_rc_grasp)
            else:
                pth1 = join(FAST_PATH, 't_success', 'rgb_' + f_rc_success)
                pth2 = join(FAST_PATH, 't_success', 'depth_' + f_rc_success)
        cv2.imwrite(pth1, c_img)
        cv2.imwrite(pth2, d_img)
Ejemplo n.º 15
0
class Collect_Demos():
    def __init__(self, user_name=None, inject_noise=False, noise_scale=1.0):
        self.robot = hsrb_interface.Robot()

        self.noise = 0.1

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')
        self.gripper = self.robot.get('gripper')
        self.tl = TransformListener()
        self.cam = RGBD()
        time.sleep(5)
        self.b_d = Bottle_Detect(self.cam.read_info_data())

        self.start_recording = False
        self.stop_recording = False

        self.com = COM()

        if (not user_name == None):
            self.com.Options.setup(self.com.Options.root_dir, user_name)

        #self.com.go_to_initial_state(self.whole_body,self.gripper)

        #self.whole_body.move_to_joint_positions({'head_tilt_joint':-0.3})

        self.joystick = JoyStick_X(self.com,
                                   inject_noise=inject_noise,
                                   noise_scale=noise_scale)
        self.torque = Gripper_Torque()
        self.joints = Joint_Positions()

    def proess_state(self):

        img_rgb = self.cam.read_color_data()
        img_depth = self.cam.read_depth_data()

        cur_action, noise_action, time_pub = self.joystick.apply_control()

        state = self.com.format_data(img_rgb, img_depth)

        #cv2.imwrite('frame_'+str(self.count)+'.png',state[0])
        #Save all data
        data = {}
        data['action'] = cur_action
        data['color_img'] = state[0]
        data['depth_img'] = state[1]
        data['noise_action'] = noise_action

        pose = self.whole_body.get_end_effector_pose().pos
        pose = np.array([pose.x, pose.y, pose.z])
        data['robot_pose'] = pose
        # data['action_time'] = time_pub
        # data['image_time'] = self.cam.color_time_stamped

        print "ACTION AT COUNT ", self.count
        print cur_action
        self.count += 1
        self.trajectory.append(data)
        # if(LA.norm(cur_action) > 1e-3):
        # 	print "GOT ACCEPTED"
        # 	self.trajectory.append(data)

    def check_success(self):

        img_rgb = self.cam.read_color_data()
        img_depth = self.cam.read_depth_data()

        success = self.b_d.detect_bottle(img_rgb, img_depth)

        print "BOTTLE FOUND ", success

        return success

    def run(self):

        self.joystick.apply_control()
        cur_recording = self.joystick.get_record_actions_passive()

        if (cur_recording[0] < -0.1):
            print "BEGIN DATA COLLECTION"
            self.start_recording = True
        count = 0

        if (self.start_recording):
            self.count = 0
            self.trajectory = []
            while not self.stop_recording:
                #while count < 20:

                if (self.cam.is_updated):
                    self.proess_state()
                    self.cam.is_updated = False

                cur_recording = self.joystick.get_record_actions()
                if (cur_recording[1] < -0.1):
                    print "END DATA COLLECTION"
                    self.stop_recording = True

                count += 1

            self.check_success()
            q = input('SAVE DATA: y or n: ')

            if (q == 'y'):
                self.com.save_recording(self.trajectory)

            self.start_recording = False
            self.stop_recording = False