Beispiel #1
0
    def __init__(self, user_name=None, inject_noise=False, noise_scale=1.0):
        self.robot = hsrb_interface.Robot()

        self.noise = 0.1

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')
        self.gripper = self.robot.get('gripper')
        self.tl = TransformListener()
        self.cam = RGBD()
        time.sleep(5)
        self.b_d = Bottle_Detect(self.cam.read_info_data())

        self.start_recording = False
        self.stop_recording = False

        self.com = COM()

        if (not user_name == None):
            self.com.Options.setup(self.com.Options.root_dir, user_name)

        #self.com.go_to_initial_state(self.whole_body,self.gripper)

        #self.whole_body.move_to_joint_positions({'head_tilt_joint':-0.3})

        self.joystick = JoyStick_X(self.com,
                                   inject_noise=inject_noise,
                                   noise_scale=noise_scale)
        self.torque = Gripper_Torque()
        self.joints = Joint_Positions()
Beispiel #2
0
	def __init__(self,features,user_name = None):
		
		self.com = COM()
		self.robot = hsrb_interface.Robot()

		self.noise = 0.1
		self.features = features#self.com.binary_image
		self.count = 0
	
		if(not user_name == None):
			self.com.Options.setup(self.com.Options.root_dir,user_name)

		

		self.omni_base = self.robot.get('omni_base')
		self.whole_body = self.robot.get('whole_body')
		self.gripper = self.robot.get('gripper')

		self.com.go_to_initial_state(self.whole_body,self.gripper)

		self.joystick = JoyStick_X(self.com)

		self.policy = Policy(self.com,self.features)

		self.com.change_grip(self.gripper)
Beispiel #3
0
    def __init__(self, user_name=None):
        self.noise = 0.1

        self.start_recording = False
        self.stop_recording = False

        self.com = COM(load_net=True)

        if (not user_name == None):
            self.com.Options.setup(self.com.Options.root_dir, user_name)

        self.com.load_net()

        self.options = self.com.Options

        self.features = Features()
Beispiel #4
0
    def __init__(self):
        self.robot = hsrb_interface.Robot()

        self.noise = 0.1

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')
        self.gripper = self.robot.get('gripper')
        self.tl = TransformListener()

        self.start_recording = False
        self.stop_recording = False

        self.com = COM()

        self.com.go_to_initial_state(self.whole_body, self.gripper)

        #self.whole_body.move_to_joint_positions({'head_tilt_joint':-0.3})

        self.cam = RGBD()

        self.joystick = JoyStick()
        self.torque = Gripper_Torque()
        self.joints = Joint_Positions()
Beispiel #5
0
    if args.last is not None:
        last = args.last
    else:
        print "please enter a last value with -l (not inclusive)"
        sys.exit()

    f = []
    for (dirpath, dirnames, filenames) in os.walk(
            Options.rollouts_dir):  #specific: sup_dir from specific options
        print dirpath
        print filenames
        f.extend(dirnames)

    train_data = []
    test_data = []
    com = COM()

    image_data = []
    count = 0
    for filename in f:

        rollout_data = pickle.load(
            open(Options.rollouts_dir + filename + '/rollout.p', 'r'))

        state = rollout_data[0]

        img = state['color_img']

        com.depth_state(state)

        height, width, layers = img.shape
Beispiel #6
0
class FindObject():

	def __init__(self,features,user_name = None):
		
		self.com = COM()
		self.robot = hsrb_interface.Robot()

		self.noise = 0.1
		self.features = features#self.com.binary_image
		self.count = 0
	
		if(not user_name == None):
			self.com.Options.setup(self.com.Options.root_dir,user_name)

		

		self.omni_base = self.robot.get('omni_base')
		self.whole_body = self.robot.get('whole_body')
		self.gripper = self.robot.get('gripper')

		self.com.go_to_initial_state(self.whole_body,self.gripper)

		self.joystick = JoyStick_X(self.com)

		self.policy = Policy(self.com,self.features)

		self.com.change_grip(self.gripper)

	def run(self):

		if(self.policy.cam.is_updated):
			self.com.load_net()
			self.policy.rollout()
			self.com.clean_up()

			self.policy.cam.is_updated = False
			self.count += 1
		

	def check_initial_state(self):

		go = False

		while not go:
			img_rgb = self.policy.cam.read_color_data()
			img_depth = self.policy.cam.read_depth_data()
			
			state = self.com.format_data(img_rgb,img_depth)
			cv2.imshow('initial_state', state[0] )
			cv2.waitKey(30)
			self.joystick.apply_control()
			cur_recording = self.joystick.get_record_actions()
			if(cur_recording[1] < -0.1):
				print "BEGIN ROLLOUT"
				go = True


	def go_to_initial_state(self):
		self.com.go_to_initial_state(self.whole_body,self.gripper)


	def check_success(self):

		self.com.clean_up()

		self.b_d = Bottle_Detect()

		img_rgb = self.policy.cam.read_color_data()

		success = self.b_d.detect_bottle(img_rgb)

		self.b_d.clean_up()

		print "BOTTLE FOUND ",success

		return success

	def save_data(self,img_rgb,img_detect):

		state = self.com.format_data(img_rgb,None)

		state['found_object'] = img_detect
		state['object_poses']

	def check_marker(self):

		try: 
			A = self.tl.lookupTransform('head_l_stereo_camera_frame','ar_marker/12', rospy.Time(0))
		except: 
			rospy.logerr('trash not found')
			return False

		return True

	def execute_grasp(self):

		self.com.grip_open(self.gripper)
		
		self.whole_body.end_effector_frame = 'hand_palm_link'
		nothing = True
		
		try:
			self.whole_body.move_end_effector_pose(geometry.pose(y=0.15,z=-0.11, ek=-1.57),'ar_marker/9')
			nothing = False
		except:
			rospy.logerr('mustard bottle found')


		self.com.grip_squeeze(self.gripper)

		self.whole_body.move_end_effector_pose(geometry.pose(z=-0.9),'hand_palm_link')

		self.com.grip_open(self.gripper)
		self.com.grip_squeeze(self.gripper)
Beispiel #7
0
        sys.exit()


    Options.setup(Options.root_dir,user_name)


   
    f = []
    for (dirpath, dirnames, filenames) in os.walk(Options.rollouts_dir): #specific: sup_dir from specific options
        print dirpath
        print filenames
        f.extend(dirnames)

    train_data = []
    test_data = []
    com = COM()

    for filename in f:
        if( filename == 'rollout42'):
       
            rollout_data = pickle.load(open(Options.rollouts_dir+filename+'/rollout.p','r'))

            state = rollout_data[0]
            
            img = state['color_img']

            com.depth_state(state)

            height,width,layers = img.shape
          
            a = cv2.cv.CV_FOURCC('M','J','P','G')
Beispiel #8
0
class Collect_Demos():
    def __init__(self):
        self.robot = hsrb_interface.Robot()

        self.noise = 0.1

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')
        self.gripper = self.robot.get('gripper')
        self.tl = TransformListener()

        self.start_recording = False
        self.stop_recording = False

        self.com = COM()

        self.com.go_to_initial_state(self.whole_body, self.gripper)

        #self.whole_body.move_to_joint_positions({'head_tilt_joint':-0.3})

        self.cam = RGBD()

        self.joystick = JoyStick()
        self.torque = Gripper_Torque()
        self.joints = Joint_Positions()

    def proess_state(self):

        img_rgb = self.cam.read_color_data()
        img_depth = self.cam.read_depth_data()

        cur_action = self.joystick.get_current_control()

        state = self.com.format_data(img_rgb, img_depth)

        cv2.imshow('debug', state[0])

        cv2.waitKey(30)
        #Save all data
        data = {}
        data['action'] = cur_action
        data['color_img'] = state[0]
        data['depth_img'] = state[1]
        data['noisey_twist'] = self.joystick.get_current_twist()
        data['gripper_torque'] = self.torque.read_data()
        data['joint_positions'] = self.joints.read_data()

        if (LA.norm(cur_action) > 1e-4):
            self.trajectory.append(data)

    def run(self):
        cur_recording = self.joystick.get_record_actions()

        if (cur_recording[0] < -0.1):
            print "BEGIN DATA COLLECTION"
            self.start_recording = True
        count = 0

        if (self.start_recording):
            self.trajectory = []
            while not self.stop_recording:
                #while count < 20:

                if (count % 6 == 0):
                    self.proess_state()

                cur_recording = self.joystick.get_record_actions()
                if (cur_recording[1] < -0.1):
                    print "END DATA COLLECTION"
                    self.stop_recording = True

                count += 1

            q = input('SAVE DATA: y or n: ')

            if (q == 'y'):
                self.com.save_recording(self.trajectory)

            self.start_recording = False
            self.stop_recording = False
Beispiel #9
0
class FindObject():

	def __init__(self,features,user_name = None):
		
		self.com = COM()
		self.robot = hsrb_interface.Robot()

		self.noise = 0.1
		self.features = features#self.com.binary_image
		self.count = 0
	
		if(not user_name == None):
			self.com.Options.setup(self.com.Options.root_dir,user_name)

		

		self.omni_base = self.robot.get('omni_base')
		self.whole_body = self.robot.get('whole_body')
		self.gripper = self.robot.get('gripper')

		#self.com.go_to_initial_state(self.whole_body,self.gripper)

		self.joystick = JoyStick_X(self.com)
		self.tl = TransformListener() 

		self.policy = Policy(self.com,self.features)

	def run(self):

		if(self.policy.cam.is_updated):
			self.com.load_net()
			self.policy.rollout()
			self.com.clean_up()

			self.policy.cam.is_updated = False
			self.count += 1
		

	def check_initial_state(self):

		go = False
		sticky = True
		while  sticky: 
			self.joystick.apply_control()
			cur_recording = self.joystick.get_record_actions()
			if(cur_recording[1] > -0.1):
				print "BEGIN ROLLOUT"
				sticky  = False


		while not go:
			img_rgb = self.policy.cam.read_color_data()
			img_depth = self.policy.cam.read_depth_data()
			
			state = self.com.format_data(img_rgb,img_depth)
			cv2.imshow('initial_state', state[0] )
			cv2.waitKey(30)
			self.joystick.apply_control()
			cur_recording = self.joystick.get_record_actions()
			if(cur_recording[1] < -0.1):
				print "BEGIN ROLLOUT"
				go = True


	def go_to_initial_state(self):
		self.com.go_to_initial_state(self.whole_body,self.gripper)


	def clear_data(self):
		self.trajectory = []

	def mark_success(self,q):

		state = {}
		if(q == 'y'):
			state['success'] = 1
		else:
			state['success'] = 0

		self.trajectory.append(state)

	def is_goal_objet(self):

		
		try:
		
			full_pose = self.tl.lookupTransform('head_l_stereo_camera_frame','ar_marker/9', rospy.Time(0))
			trans = full_pose[0]

			transforms = self.tl.getFrameStrings()
			poses = []


			

			for transform in transforms:
				if 'bottle' in transform:
					f_p = self.tl.lookupTransform('head_rgbd_sensor_link',transform, rospy.Time(0))
					poses.append(f_p[0])
					print 'augmented pose ',f_p
					print 'ar_marker ', trans


			for pose in poses:
				
				if(LA.norm(pose[2]-0.1-trans[2])< 0.03):
					return True
		except: 
			rospy.logerr('AR MARKER NOT THERE')

		return False


	def check_success(self):

		self.com.clean_up()

		self.b_d = Bottle_Detect(self.policy.cam.read_info_data())

		img_rgb = self.policy.cam.read_color_data()
		img_depth = self.policy.cam.read_depth_data()

		s_obj,img_detect,poses = self.b_d.detect_bottle(img_rgb,img_depth)

		success = self.is_goal_objet()

		self.b_d.clean_up()

		self.process_data(img_rgb,img_detect,poses,success)



		print "BOTTLE FOUND ",success

		return success

	def process_data(self,img_rgb,img_detect,object_poses,success):

		img_rgb_cr,img_d = self.com.format_data(img_rgb,None)

		state = {}
		state['color_img'] = img_rgb_cr
		state['found_object'] = img_detect
		state['object_poses'] = object_poses
		state['was_success'] = success

		self.trajectory.append(state)

	def save_data(self):

		self.com.save_evaluation(self.trajectory)


	def check_marker(self):

		try: 
			A = self.tl.lookupTransform('head_l_stereo_camera_frame','ar_marker/9', rospy.Time(0))
		except: 
			rospy.logerr('trash not found')
			return False

		return True


	def execute_grasp(self):

		self.com.grip_open(self.gripper)
		self.whole_body.end_effector_frame = 'hand_palm_link'
		nothing = True
		
		try:
			self.whole_body.move_end_effector_pose(geometry.pose(y=0.15,z=-0.09, ek=-1.57),'ar_marker/9')
			
		except:
			rospy.logerr('mustard bottle found')

		self.com.grip_squeeze(self.gripper)

		self.whole_body.move_end_effector_pose(geometry.pose(z=-0.9),'hand_palm_link')

		self.com.grip_open(self.gripper)
		self.com.grip_squeeze(self.gripper)
Beispiel #10
0
class Compute_Noise():
    def __init__(self, user_name=None):
        self.noise = 0.1

        self.start_recording = False
        self.stop_recording = False

        self.com = COM(load_net=True)

        if (not user_name == None):
            self.com.Options.setup(self.com.Options.root_dir, user_name)

        self.com.load_net()

        self.options = self.com.Options

        self.features = Features()

    def get_test_train_split(self):
        train_labels, test_labels = pickle.load(
            open(self.options.stats_dir + 'test_train_f.p', 'r'))
        self.test_trajs = []

        for filename in test_labels:
            rollout_data = pickle.load(
                open(self.options.rollouts_dir + filename + '/rollout.p', 'r'))

            self.test_trajs.append(rollout_data)

    def compute_covariance_matrix(self):

        self.covariance = np.zeros([3, 3])
        self.get_test_train_split()

        N = len(self.test_trajs)

        for traj in self.test_trajs:

            T = float(len(traj))
            t_covariance = np.zeros([3, 3])
            for state in traj:
                img = state['color_img']
                action = state['action']

                action_ = self.com.eval_policy(img,
                                               self.features.vgg_features,
                                               cropped=True)
                action_f = np.zeros([3, 1])
                action_f[:, 0] = (action - action_)

                rank_one_update = action_f * action_f.T

                #IPython.embed()

                t_covariance = t_covariance + rank_one_update

            self.covariance = t_covariance * (1.0 / T) + self.covariance

        self.covariance = self.covariance * (1.0 / N)
        print "COMPUTED COVARIANCE MATRIX"
        print self.covariance

    def save_covariance_matrix(self):

        pickle.dump(self.covariance,
                    open(self.options.stats_dir + 'cov_matrix.p', 'wb'))
from il_ros_hsr.p_pi.safe_corl.com import Safe_COM as COM
import rospy
import time
#specific: fetches specific net file
#from deep_lfd.tensor.nets.net_grasp import Net_Grasp as Net 
########################################################


if __name__ == '__main__':

    
    robot = hsrb_interface.Robot()

    Options = options()
    yolo_detect = YoloDetect(Options)
    com = COM()

    omni_base = robot.get('omni_base')
    whole_body = robot.get('whole_body')
    gripper = robot.get('gripper')

    #com.go_to_initial_state(whole_body,gripper)
    cam = RGBD()
    time.sleep(3)

    while True:
        c_img = cam.read_color_data()
        d_img = cam.read_depth_data()


Beispiel #12
0
class Collect_Demos():
    def __init__(self, user_name=None, inject_noise=False, noise_scale=1.0):
        self.robot = hsrb_interface.Robot()

        self.noise = 0.1

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')
        self.gripper = self.robot.get('gripper')
        self.tl = TransformListener()
        self.cam = RGBD()
        time.sleep(5)
        self.b_d = Bottle_Detect(self.cam.read_info_data())

        self.start_recording = False
        self.stop_recording = False

        self.com = COM()

        if (not user_name == None):
            self.com.Options.setup(self.com.Options.root_dir, user_name)

        #self.com.go_to_initial_state(self.whole_body,self.gripper)

        #self.whole_body.move_to_joint_positions({'head_tilt_joint':-0.3})

        self.joystick = JoyStick_X(self.com,
                                   inject_noise=inject_noise,
                                   noise_scale=noise_scale)
        self.torque = Gripper_Torque()
        self.joints = Joint_Positions()

    def proess_state(self):

        img_rgb = self.cam.read_color_data()
        img_depth = self.cam.read_depth_data()

        cur_action, noise_action, time_pub = self.joystick.apply_control()

        state = self.com.format_data(img_rgb, img_depth)

        #cv2.imwrite('frame_'+str(self.count)+'.png',state[0])
        #Save all data
        data = {}
        data['action'] = cur_action
        data['color_img'] = state[0]
        data['depth_img'] = state[1]
        data['noise_action'] = noise_action

        pose = self.whole_body.get_end_effector_pose().pos
        pose = np.array([pose.x, pose.y, pose.z])
        data['robot_pose'] = pose
        # data['action_time'] = time_pub
        # data['image_time'] = self.cam.color_time_stamped

        print "ACTION AT COUNT ", self.count
        print cur_action
        self.count += 1
        self.trajectory.append(data)
        # if(LA.norm(cur_action) > 1e-3):
        # 	print "GOT ACCEPTED"
        # 	self.trajectory.append(data)

    def check_success(self):

        img_rgb = self.cam.read_color_data()
        img_depth = self.cam.read_depth_data()

        success = self.b_d.detect_bottle(img_rgb, img_depth)

        print "BOTTLE FOUND ", success

        return success

    def run(self):

        self.joystick.apply_control()
        cur_recording = self.joystick.get_record_actions_passive()

        if (cur_recording[0] < -0.1):
            print "BEGIN DATA COLLECTION"
            self.start_recording = True
        count = 0

        if (self.start_recording):
            self.count = 0
            self.trajectory = []
            while not self.stop_recording:
                #while count < 20:

                if (self.cam.is_updated):
                    self.proess_state()
                    self.cam.is_updated = False

                cur_recording = self.joystick.get_record_actions()
                if (cur_recording[1] < -0.1):
                    print "END DATA COLLECTION"
                    self.stop_recording = True

                count += 1

            self.check_success()
            q = input('SAVE DATA: y or n: ')

            if (q == 'y'):
                self.com.save_recording(self.trajectory)

            self.start_recording = False
            self.stop_recording = False