Esempio n. 1
0
    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''

        self.robot = hsrb_interface.Robot()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.cam = RGBD()
        self.com = COM()

        #self.com.go_to_initial_state(self.whole_body)

        self.count = 425

        self.joystick = JoyStick_X(self.com)
        self.true_count = 0
Esempio n. 2
0
    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''

        self.robot = hsrb_interface.Robot()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.cam = RGBD()
        self.com = COM()

        self.com.go_to_initial_state(self.whole_body)

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()
        self.gp = GraspPlanner()

        self.suction = Suction(self.gp, self.cam)

        #thread.start_new_thread(self.ql.run,())
        print "after thread"
Esempio n. 3
0
class CardPicker():
    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''

        self.robot = hsrb_interface.Robot()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.cam = RGBD()
        self.com = COM()

        #self.com.go_to_initial_state(self.whole_body)

        self.count = 425

        self.joystick = JoyStick_X(self.com)
        self.true_count = 0

    def collect_data(self):

        while True:
            c_img = self.cam.read_color_data()
            d_img = self.cam.read_depth_data()

            c_img, d_img = self.com.format_data(c_img, d_img)

            cv2.imshow('video_feed', c_img)
            cv2.waitKey(30)

            cur_recording = self.joystick.get_record_actions_passive()
            if (cur_recording[0] < -0.1 and self.true_count % 5 == 0):
                print "PHOTO SNAPPED " + str(self.count)
                cv2.imwrite(
                    cfg.IMAGE_PATH + '/frame_' + str(self.count) + '.png',
                    c_img)
                self.count += 1
            self.true_count += 1
Esempio n. 4
0
class CardPicker():
    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''

        self.robot = hsrb_interface.Robot()

        self.whole_body = self.robot.get('whole_body')

        #self.cam = RGBD()
        self.com = COM()

        self.com.go_to_initial_state(self.whole_body)

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()
        self.gp = GraspPlanner()
        #self.detector = Detector()

        self.joystick = JoyStick_X(self.com)

        #self.suction = Suction(self.gp,self.cam,self.com.Options)

        #self.suction.stop()
        #thread.start_new_thread(self.ql.run,())
        print "after thread"

    def find_mean_depth(self, d_img):
        '''
        Evaluates the current policy and then executes the motion 
        specified in the the common class
        '''

        indx = np.nonzero(d_img)

        mean = np.mean(d_img[indx])

        return

    def card_pick(self):

        while True:

            cur_recording = self.joystick.get_record_actions_passive()
            self.broadcast_transform()

            if (cur_recording[0] < -0.1):

                self.go_to_centroid(self.whole_body)

                #self.com.go_to_initial_state(self.whole_body)

    def broadcast_transform(self):

        try:
            self.br.sendTransform(
                (0.0, 0.0, -0.02),
                tf.transformations.quaternion_from_euler(ai=-0.785,
                                                         aj=0.0,
                                                         ak=0.0),
                rospy.Time.now(), 'transform_ar_marker', 'ar_marker/11')
        except:
            rospy.logerr('ar marker not found')

    def go_to_centroid(self, whole_body):

        whole_body.end_effector_frame = 'hand_l_finger_vacuum_frame'
        nothing = True

        #self.whole_body.move_to_neutral()

        whole_body.move_end_effector_pose(geometry.pose(z=-0.02, ei=-0.785),
                                          'ar_marker/11')
        #whole_body.move_end_effector_by_line((0,0,1),0.02)
        #self.start()

        #whole_body.move_to_joint_positions({'arm_lift_joint':0.23})

    def check_card_found(self):

        # try:
        transforms = self.tl.getFrameStrings()

        cards = []

        for transform in transforms:
            print transform
            if 'card' in transform:
                print 'got here'
                f_p = self.tl.lookupTransform('head_rgbd_sensor_rgb_frame',
                                              transform, rospy.Time(0))
                cards.append(transform)

                return True, cards
        # except:
        return False, []
Esempio n. 5
0
class CardPicker():
    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''

        self.robot = hsrb_interface.Robot()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.cam = RGBD()
        self.com = COM()

        #self.com.go_to_initial_state(self.whole_body)

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()
        self.gp = GraspPlanner()
        self.detector = Detector()

        self.joystick = JoyStick_X(self.com)

        self.suction = Suction(self.gp, self.cam, self.com.Options)

        #self.suction.stop()
        #thread.start_new_thread(self.ql.run,())
        print "after thread"

    def find_mean_depth(self, d_img):
        '''
        Evaluates the current policy and then executes the motion 
        specified in the the common class
        '''

        indx = np.nonzero(d_img)

        mean = np.mean(d_img[indx])

        return

    def find_card(self):

        self.whole_body.move_to_neutral()
        self.whole_body.move_to_joint_positions({
            'arm_flex_joint': -1.57,
            'head_tilt_joint': -.785
        })
        print self.check_card_found()[0]
        i = 0
        while True:
            print "panning ", i
            self.whole_body.move_to_joint_positions({'head_pan_joint': .30})
            i += 1
            if self.check_card_found()[0]:
                break
        print "found card!"
        self.card_pick()

    def card_pick(self):

        while True:

            c_img = self.cam.read_color_data()
            c_img_c = np.copy(c_img)
            cv2.imshow('debug_true', c_img_c)
            cv2.waitKey(30)
            d_img = self.cam.read_depth_data()
            if (not c_img == None and not d_img == None):
                c_img_cropped, d_img = self.com.format_data(
                    np.copy(c_img), d_img)

                data = self.detector.numpy_detector(np.copy(c_img_cropped))

                cur_recording = self.joystick.get_record_actions_passive()
                self.suction.find_pick_region_net(data, c_img, d_img, c_img_c)
                if (cur_recording[0] < -0.1):

                    card_found, cards = self.check_card_found()

                    if (card_found):
                        self.suction.execute_grasp(cards, self.whole_body)

                        self.com.go_to_initial_state(self.whole_body)

    def check_card_found(self):

        # try:
        transforms = self.tl.getFrameStrings()

        cards = []

        for transform in transforms:
            #print transform
            if 'card' in transform:
                print 'got here'
                f_p = self.tl.lookupTransform('head_rgbd_sensor_rgb_frame',
                                              transform, rospy.Time(0))
                cards.append(transform)

                return True, cards
        # except:
        return False, []
Esempio n. 6
0
class CardPicker():
    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''

        self.robot = hsrb_interface.Robot()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.cam = RGBD()
        self.com = COM()

        self.com.go_to_initial_state(self.whole_body)

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()
        self.gp = GraspPlanner()

        self.suction = Suction(self.gp, self.cam)

        #thread.start_new_thread(self.ql.run,())
        print "after thread"

    def find_mean_depth(self, d_img):
        '''
        Evaluates the current policy and then executes the motion 
        specified in the the common class
        '''

        indx = np.nonzero(d_img)

        mean = np.mean(d_img[indx])

        return

    def card_pick(self):

        while True:

            c_img = self.cam.read_color_data()
            d_img = self.cam.read_depth_data()
            if (not c_img == None and not d_img == None):

                self.ql = QueryLabeler()
                self.ql.run(c_img)
                data = self.ql.label_data
                del self.ql

                self.suction.find_pick_region(data, c_img, d_img)

                # card_found,cards = self.check_card_found()

                # if(card_found):
                #     self.suction.execute_grasp(cards,self.whole_body)

                # self.com.go_to_initial_state(self.whole_body)

    def check_card_found(self):

        # try:
        transforms = self.tl.getFrameStrings()

        cards = []

        for transform in transforms:
            print transform
            if 'card' in transform:
                print 'got here'
                f_p = self.tl.lookupTransform('head_rgbd_sensor_rgb_frame',
                                              transform, rospy.Time(0))
                cards.append(transform)

        return True, cards