Beispiel #1
0
def run():

    tt = tf.Transformer(True, rospy.Duration(10.0))

    tfl = TransformListener()
    #print tf.allFramesAsString()
    print tfl.getFrameStrings()
    if tfl.frameExists("/base_link") and tfl.frameExists("/base_footprint"):
        t = tfl.getLatestCommonTime("/base_link", "/base_footprint")
        position, quaternion = tfl.lookupTransform("/base_link",
                                                   "/base_footprint", t)
        print position, quaternion
    else:
        print "no"
class StateRepublisher:
    STATE_TOPIC = '/state'
    LOCAL_STATE_TOPIC = '/state_local'

    def __init__(self):
        rospy.init_node('state_republisher')
        self._pub_pose = {}
        self._pub_twist = {}
        self.listener = TransformListener()

        for d in utils.get_axes():
            self._pub_pose[d] = rospy.Publisher(utils.get_pose_topic(d),
                                                Float64,
                                                queue_size=3)
            self._pub_twist[d] = rospy.Publisher(utils.get_twist_topic(d),
                                                 Float64,
                                                 queue_size=3)

        self._pub_local_state = rospy.Publisher(self.LOCAL_STATE_TOPIC,
                                                Odometry,
                                                queue_size=3)

        rospy.Subscriber(self.STATE_TOPIC, Odometry, self.receive_odometry)
        rospy.spin()

    def receive_odometry(self, odometry):
        if 'base_link' in self.listener.getFrameStrings():
            local_pose = utils.transform_pose(self.listener, 'odom',
                                              'base_link', odometry.pose.pose)

            utils.publish_data_dictionary(self._pub_pose, utils.get_axes(),
                                          utils.parse_pose(local_pose))

            local_twist = utils.transform_twist(self.listener, 'odom',
                                                'base_link',
                                                odometry.twist.twist)
            utils.publish_data_dictionary(self._pub_twist, utils.get_axes(),
                                          utils.parse_twist(local_twist))

            local_state = Odometry()
            local_state.header.frame_id = 'base_link'
            local_state.pose.pose = local_pose
            local_state.twist.twist = local_twist
            self._pub_local_state.publish(local_state)
class CardPicker():
    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''

        self.robot = hsrb_interface.Robot()

        self.whole_body = self.robot.get('whole_body')

        #self.cam = RGBD()
        self.com = COM()

        self.com.go_to_initial_state(self.whole_body)

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()
        self.gp = GraspPlanner()
        #self.detector = Detector()

        self.joystick = JoyStick_X(self.com)

        #self.suction = Suction(self.gp,self.cam,self.com.Options)

        #self.suction.stop()
        #thread.start_new_thread(self.ql.run,())
        print "after thread"

    def find_mean_depth(self, d_img):
        '''
        Evaluates the current policy and then executes the motion 
        specified in the the common class
        '''

        indx = np.nonzero(d_img)

        mean = np.mean(d_img[indx])

        return

    def card_pick(self):

        while True:

            cur_recording = self.joystick.get_record_actions_passive()
            self.broadcast_transform()

            if (cur_recording[0] < -0.1):

                self.go_to_centroid(self.whole_body)

                #self.com.go_to_initial_state(self.whole_body)

    def broadcast_transform(self):

        try:
            self.br.sendTransform(
                (0.0, 0.0, -0.02),
                tf.transformations.quaternion_from_euler(ai=-0.785,
                                                         aj=0.0,
                                                         ak=0.0),
                rospy.Time.now(), 'transform_ar_marker', 'ar_marker/11')
        except:
            rospy.logerr('ar marker not found')

    def go_to_centroid(self, whole_body):

        whole_body.end_effector_frame = 'hand_l_finger_vacuum_frame'
        nothing = True

        #self.whole_body.move_to_neutral()

        whole_body.move_end_effector_pose(geometry.pose(z=-0.02, ei=-0.785),
                                          'ar_marker/11')
        #whole_body.move_end_effector_by_line((0,0,1),0.02)
        #self.start()

        #whole_body.move_to_joint_positions({'arm_lift_joint':0.23})

    def check_card_found(self):

        # try:
        transforms = self.tl.getFrameStrings()

        cards = []

        for transform in transforms:
            print transform
            if 'card' in transform:
                print 'got here'
                f_p = self.tl.lookupTransform('head_rgbd_sensor_rgb_frame',
                                              transform, rospy.Time(0))
                cards.append(transform)

                return True, cards
        # except:
        return False, []
Beispiel #4
0
class BedMaker():
    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''

        self.robot = hsrb_interface.Robot()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()

        if cfg.USE_WEB_INTERFACE:
            self.wl = Web_Labeler()
        else:
            self.wl = Python_Labeler(self.cam)

        self.com.go_to_initial_state(self.whole_body)

        self.tt = TableTop()
        self.tt.find_table(self.robot)

        self.grasp_count = 0

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()
        self.gp = GraspPlanner()

        self.gripper = Bed_Gripper(self.gp, self.cam, self.com.Options,
                                   self.robot.get('gripper'))

        self.sc = Success_Check(self.whole_body, self.tt, self.cam,
                                self.omni_base)

        #self.test_current_point()
        time.sleep(4)
        #thread.start_new_thread(self.ql.run,())
        print "after thread"

    def find_mean_depth(self, d_img):
        '''
        Evaluates the current policy and then executes the motion 
        specified in the the common class
        '''

        indx = np.nonzero(d_img)

        mean = np.mean(d_img[indx])

        return

    def bed_pick(self):

        while True:

            c_img = self.cam.read_color_data()
            d_img = self.cam.read_depth_data()
            if (not c_img == None and not d_img == None):

                c_img = self.cam.read_color_data()
                d_img = self.cam.read_depth_data()

                data = self.wl.label_image(c_img)

                self.gripper.find_pick_region_labeler(data, c_img, d_img,
                                                      self.grasp_count)

                pick_found, bed_pick = self.check_card_found()

                self.grasp_count += 1

                if (pick_found):
                    if (self.side == 'BOTTOM'):
                        self.gripper.execute_grasp(bed_pick, self.whole_body,
                                                   'head_down')
                        success = self.sc.check_bottom_success(self.wl)

                        print "WAS SUCCESFUL: "
                        print success
                        if (success):
                            self.move_to_top_side()
                            self.side = "TOP"

                    elif (self.side == "TOP"):
                        self.gripper.execute_grasp(bed_pick, self.whole_body,
                                                   'head_up')
                        success = self.sc.check_top_success(self.wl)

                        print "WAS SUCCESFUL: "
                        print success

                        if (success):
                            self.side == "PILLOW"

    def test_current_point(self):

        self.gripper.tension.force_pull(self.whole_body, (0, 1, 0))
        self.gripper.com.grip_open(self.gripper)
        self.move_to_top_side()

    def move_to_top_side(self):

        self.tt.move_to_pose(self.omni_base, 'right_down')
        self.tt.move_to_pose(self.omni_base, 'right_mid')

        self.tt.move_to_pose(self.omni_base, 'right_up')

        self.tt.move_to_pose(self.omni_base, 'top_mid')

    def check_bottom_success(self):

        self.tt.move_to_pose(self.omni_base, 'lower_mid')
        self.whole_body.move_to_joint_positions({'head_tilt_joint': -0.8})

    def check_card_found(self):

        # try:
        transforms = self.tl.getFrameStrings()

        cards = []

        try:

            for transform in transforms:
                print transform
                current_grasp = 'bed_' + str(self.grasp_count)
                if current_grasp in transform:
                    print 'got here'
                    f_p = self.tl.lookupTransform('head_rgbd_sensor_rgb_frame',
                                                  transform, rospy.Time(0))
                    cards.append(transform)

        except:
            rospy.logerr('bed pick not found yet')

        return True, cards
Beispiel #5
0
class BedMaker():
    def __init__(self, args):
        """For deploying the bed-making policy, not for data collection.

        We use all three variants (analytic, human, networks) here due to
        similarities in code structure.
        """
        self.args = args
        DEBUG = True

        # Set up the robot.
        self.robot = robot = hsrb_interface.Robot()
        if DEBUG:
            print("finished: hsrb_interface.Robot()...")
        self.rgbd_map = RGBD2Map()
        self.omni_base = self.robot.get('omni_base')
        if DEBUG:
            print("finished: robot.get(omni_base)...")
        self.whole_body = self.robot.get('whole_body')
        if DEBUG:
            print("finished: robot.get(whole_body)...")
        self.cam = RGBD()
        self.com = COM()
        self.wl = Python_Labeler(cam=self.cam)

        # Set up initial state, table, etc. Don't forget view mode!
        self.view_mode = BED_CFG.VIEW_MODE
        self.com.go_to_initial_state(self.whole_body)
        if DEBUG:
            print("finished: go_to_initial_state() ...")
        self.tt = TableTop()
        if DEBUG:
            print("finished: TableTop()...")

        # For now, a workaround. Ugly but it should do the job ...
        #self.tt.find_table(robot)
        self.tt.make_fake_ar()
        self.tt.find_table_workaround(robot)

        #self.ins = InitialSampler(self.cam)
        self.side = 'BOTTOM'
        self.grasp_count = 0
        self.b_grasp_count = 0
        self.t_grasp_count = 0

        # AH, build the YOLO network beforehand.
        g_cfg = BED_CFG.GRASP_CONFIG
        s_cfg = BED_CFG.SUCC_CONFIG
        self.yc = YOLO_CONV(options=s_cfg)
        self.yc.load_network()

        # Policy for grasp detection, using Deep Imitation Learning.
        # Or, actually, sometimes we will use humans or an analytic version.
        if DEBUG:
            self._test_variables()
        print("\nnow forming the GDetector with type {}".format(args.g_type))
        if args.g_type == 'network':
            self.g_detector = GDetector(g_cfg, BED_CFG, yc=self.yc)
        elif args.g_type == 'analytic':
            self.g_detector = Analytic_Grasp()  # TODO not implemented!
        elif args.g_type == 'human':
            print("Using a human, don't need to have a `g_detector`. :-)")

        if DEBUG:
            self._test_variables()
            print("\nnow making success net")
        self.sn = Success_Net(self.whole_body,
                              self.tt,
                              self.cam,
                              self.omni_base,
                              fg_cfg=s_cfg,
                              bed_cfg=BED_CFG,
                              yc=self.yc)

        # Bells and whistles.
        self.br = TransformBroadcaster()
        self.tl = TransformListener()
        self.gp = GraspPlanner()
        self.gripper = Bed_Gripper(self.gp, self.cam, self.com.Options,
                                   robot.get('gripper'))
        self.dp = DrawPrediction()

        # When we start, do rospy.spin() to check the frames (phase 1). Then re-run.
        # The current hack we have to get around crummy AR marker detection. :-(
        if DEBUG:
            self._test_variables()
        print("Finished with init method")
        time.sleep(4)
        if args.phase == 1:
            print("Now doing rospy.spin() because phase = 1.")
            rospy.spin()

        # For evaluating coverage.
        self.img_start = None
        self.img_final = None
        self.img_start2 = None
        self.img_final2 = None

        # For grasp offsets.
        self.apply_offset = False

    def bed_make(self):
        """Runs the pipeline for deployment, testing out bed-making.
        """
        # Get the starting image (from USB webcam). Try a second as well.
        cap = cv2.VideoCapture(0)
        frame = None
        while frame is None:
            ret, frame = cap.read()
            cv2.waitKey(50)
        self.image_start = frame
        cv2.imwrite('image_start.png', self.image_start)

        _, frame = cap.read()
        self.image_start2 = frame
        cv2.imwrite('image_start2.png', self.image_start2)

        cap.release()
        print(
            "NOTE! Recorded `image_start` for coverage evaluation. Was it set up?"
        )

        def get_pose(data_all):
            # See `find_pick_region_labeler` in `p_pi/bed_making/gripper.py`.
            # It's because from the web labeler, we get a bunch of objects.
            # So we have to compute the pose (x,y) from it.
            res = data_all['objects'][0]
            x_min = float(res['box'][0])
            y_min = float(res['box'][1])
            x_max = float(res['box'][2])
            y_max = float(res['box'][3])
            x = (x_max - x_min) / 2.0 + x_min
            y = (y_max - y_min) / 2.0 + y_min
            return (x, y)

        args = self.args
        use_d = BED_CFG.GRASP_CONFIG.USE_DEPTH
        self.get_new_grasp = True
        self.new_grasp = True
        self.rollout_stats = []  # What we actually save for analysis later

        # Add to self.rollout_stats at the end for more timing info
        self.g_time_stats = []  # for _execution_ of a grasp
        self.move_time_stats = []  # for moving to the other side

        while True:
            c_img = self.cam.read_color_data()
            d_img = self.cam.read_depth_data()

            if (not c_img.all() == None and not d_img.all() == None):
                if self.new_grasp:
                    self.position_head()
                else:
                    self.new_grasp = True
                time.sleep(3)

                c_img = self.cam.read_color_data()
                d_img = self.cam.read_depth_data()
                d_img_raw = np.copy(d_img)  # Needed for determining grasp pose

                # --------------------------------------------------------------
                # Process depth images! Helps network, human, and (presumably) analytic.
                # Obviously human can see the c_img as well ... hard to compare fairly.
                # --------------------------------------------------------------
                if use_d:
                    if np.isnan(np.sum(d_img)):
                        cv2.patchNaNs(d_img, 0.0)
                    d_img = depth_to_net_dim(d_img, robot='HSR')
                    policy_input = np.copy(d_img)
                else:
                    policy_input = np.copy(c_img)

                # --------------------------------------------------------------
                # Run grasp detector to get data=(x,y) point for target, record stats.
                # Note that the web labeler returns a dictionary like this:
                # {'objects': [{'box': (155, 187, 165, 194), 'class': 0}], 'num_labels': 1}
                # but we really want just the 2D grasping point. So use `get_pose()`.
                # Also, for the analytic one, we'll pick the highest point ourselves.
                # --------------------------------------------------------------
                sgraspt = time.time()
                if args.g_type == 'network':
                    data = self.g_detector.predict(policy_input)
                elif args.g_type == 'analytic':
                    data_all = self.wl.label_image(policy_input)
                    data = get_pose(data_all)
                elif args.g_type == 'human':
                    data_all = self.wl.label_image(policy_input)
                    data = get_pose(data_all)
                egraspt = time.time()

                g_predict_t = egraspt - sgraspt
                print("Grasp predict time: {:.2f}".format(g_predict_t))
                self.record_stats(c_img, d_img_raw, data, self.side,
                                  g_predict_t, 'grasp')

                # For safety, we can check image and abort as needed before execution.
                if use_d:
                    img = self.dp.draw_prediction(d_img, data)
                else:
                    img = self.dp.draw_prediction(c_img, data)
                caption = 'G Predicted: {} (ESC to abort, other key to proceed)'.format(
                    data)
                call_wait_key(cv2.imshow(caption, img))

                # --------------------------------------------------------------
                # Broadcast grasp pose, execute the grasp, check for success.
                # We'll use the `find_pick_region_net` since the `data` is the
                # (x,y) pose, and not `find_pick_region_labeler`.
                # --------------------------------------------------------------
                self.gripper.find_pick_region_net(
                    pose=data,
                    c_img=c_img,
                    d_img=d_img_raw,
                    count=self.grasp_count,
                    side=self.side,
                    apply_offset=self.apply_offset)
                pick_found, bed_pick = self.check_card_found()

                if self.side == "BOTTOM":
                    self.whole_body.move_to_go()
                    self.tt.move_to_pose(self.omni_base, 'lower_start')
                    tic = time.time()
                    self.gripper.execute_grasp(bed_pick, self.whole_body,
                                               'head_down')
                    toc = time.time()
                else:
                    self.whole_body.move_to_go()
                    self.tt.move_to_pose(self.omni_base, 'top_mid')
                    tic = time.time()
                    self.gripper.execute_grasp(bed_pick, self.whole_body,
                                               'head_up')
                    toc = time.time()
                self.g_time_stats.append(toc - tic)
                self.check_success_state(policy_input)

    def check_success_state(self, old_grasp_image):
        """
        Checks whether a single grasp in a bed-making trajectory succeeded.
        Depends on which side of the bed the HSR is at. Invokes the learned
        success network policy and transitions the HSR if successful.

        When we record the data, c_img and d_img should be what success net saw.

        UPDATE: now we can pass in the previous `d_img` from the grasping to
        compare the difference. Well, technically the `policy_input` so it can
        handle either case.
        """
        use_d = BED_CFG.SUCC_CONFIG.USE_DEPTH
        if self.side == "BOTTOM":
            result = self.sn.check_bottom_success(use_d, old_grasp_image)
            self.b_grasp_count += 1
        else:
            result = self.sn.check_top_success(use_d, old_grasp_image)
            self.t_grasp_count += 1
        self.grasp_count += 1
        assert self.grasp_count == self.b_grasp_count + self.t_grasp_count

        success = result['success']
        data = result['data']
        c_img = result['c_img']
        d_img = result['d_img']
        d_img_raw = result['d_img_raw']
        s_predict_t = result['s_predict_t']
        img_diff = result['diff_l2']
        img_ssim = result['diff_ssim']
        self.record_stats(c_img, d_img_raw, data, self.side, s_predict_t,
                          'success')

        # We really need a better metric, such as 'structural similarity'.
        # Edit: well, it's probably marginally better, I think.
        # I use an L2 threshold of 98k, and an SSIM threshold of 0.88.

        if BED_CFG.GRASP_CONFIG.USE_DEPTH != BED_CFG.SUCC_CONFIG.USE_DEPTH:
            print("grasp vs success for using depth differ")
            print("for now we'll ignore the offset issue.")
        else:
            print("L2 and SSIM btwn grasp & next image: {:.1f} and {:.3f}".
                  format(img_diff, img_ssim))
            if img_ssim >= 0.875 or img_diff < 85000:
                print("APPLYING OFFSET! (self.apply_offset = True)")
                self.apply_offset = True
            else:
                print("no offset applied (self.apply_offset = False)")
                self.apply_offset = False

        # Have user confirm that this makes sense.
        caption = "Success net saw this and thought: {}. Press any key".format(
            success)
        if use_d:
            call_wait_key(cv2.imshow(caption, d_img))
        else:
            call_wait_key(cv2.imshow(caption, c_img))

        # Limit amount of grasp attempts per side, pretend 'success' anyway.
        lim = BED_CFG.GRASP_ATTEMPTS_PER_SIDE
        if (self.side == 'BOTTOM' and self.b_grasp_count >= lim) or \
                (self.side == 'TOP' and self.t_grasp_count >= lim):
            print("We've hit {} for this side so set success=True".format(lim))
            success = True

        # Handle transitioning to different side
        if success:
            if self.side == "BOTTOM":
                self.transition_to_top()
                self.side = 'TOP'
            else:
                self.transition_to_start()
            print(
                "We're moving to another side so revert self.apply_offset = False."
            )
            self.apply_offset = False
        else:
            self.new_grasp = False

    def transition_to_top(self):
        """Transition to top (not bottom)."""
        transition_time = self.move_to_top_side()
        self.move_time_stats.append(transition_time)

    def transition_to_start(self):
        """Transition to start=bottom, SAVE ROLLOUT STATS, exit program.

        The `rollout_stats` is a list with a bunch of stats recorded via the
        class method `record_stats`. We save with a top-down webcam and save
        before moving back, since the HSR could disconnect.
        """
        # Record the final image for evaluation later (from USB webcam).
        cap = cv2.VideoCapture(0)
        frame = None
        while frame is None:
            ret, frame = cap.read()
        self.image_final = frame
        cv2.imwrite('image_final.png', self.image_final)

        _, frame = cap.read()
        self.image_final2 = frame
        cv2.imwrite('image_final2.png', self.image_final2)

        cap.release()
        print("NOTE! Recorded `image_final` for coverage evaluation.")

        # Append some last-minute stuff to `self.rollout_stats` for saving.
        final_stuff = {
            'image_start': self.image_start,
            'image_final': self.image_final,
            'image_start2': self.image_start2,
            'image_final2': self.image_final2,
            'grasp_times': self.g_time_stats,
            'move_times': self.move_time_stats,
            'args': self.args,  # ADDING THIS! Now we can 'retrace' our steps.
        }
        self.rollout_stats.append(final_stuff)

        # SAVE, move to start, then exit.
        self.com.save_stat(self.rollout_stats, target_path=self.args.save_path)
        self.move_to_start()
        sys.exit()

    def record_stats(self, c_img, d_img, data, side, time, typ):
        """Adds a dictionary to the `rollout_stats` list.

        We can tell it's a 'net' thing due to 'net_pose' and 'net_succ' keys.
        EDIT: argh wish I hadn't done that since this script also handles the
        human and analytic cases. Oh well, too late for that now.
        """
        assert side in ['BOTTOM', 'TOP']
        grasp_point = {}
        grasp_point['c_img'] = c_img
        grasp_point['d_img'] = d_img
        if typ == "grasp":
            grasp_point['net_pose'] = data
            grasp_point['g_net_time'] = time
        elif typ == "success":
            grasp_point['net_succ'] = data
            grasp_point['s_net_time'] = time
        else:
            raise ValueError(typ)
        grasp_point['side'] = side
        grasp_point['type'] = typ
        self.rollout_stats.append(grasp_point)

    def position_head(self):
        """Position head for a grasp.

        Use lower_start_tmp so HSR looks 'sideways'; thus, hand is not in the way.
        """
        self.whole_body.move_to_go()
        if self.side == "BOTTOM":
            self.tt.move_to_pose(self.omni_base, 'lower_start_tmp')
        self.whole_body.move_to_joint_positions(
            {'arm_flex_joint': -np.pi / 16.0})
        self.whole_body.move_to_joint_positions(
            {'head_pan_joint': np.pi / 2.0})
        self.whole_body.move_to_joint_positions({'arm_lift_joint': 0.120})
        self.whole_body.move_to_joint_positions(
            {'head_tilt_joint': -np.pi / 4.0})

    def move_to_top_side(self):
        """Assumes we're at the bottom and want to go to the top."""
        self.whole_body.move_to_go()
        tic = time.time()
        self.tt.move_to_pose(self.omni_base, 'right_down_1')
        self.tt.move_to_pose(self.omni_base, 'right_mid_1')
        self.tt.move_to_pose(self.omni_base, 'right_up_1')
        self.tt.move_to_pose(self.omni_base, 'top_mid_tmp')
        toc = time.time()
        return toc - tic

    def move_to_start(self):
        """Assumes we're at the top and we go back to the start.

        Go to lower_start_tmp to be at the same view as we started with, so that
        we take a c_img and compare coverage.
        """
        self.whole_body.move_to_go()
        tic = time.time()
        self.tt.move_to_pose(self.omni_base, 'right_up_2')
        self.tt.move_to_pose(self.omni_base, 'right_mid_2')
        self.tt.move_to_pose(self.omni_base, 'right_down_2')
        self.tt.move_to_pose(self.omni_base, 'lower_start_tmp')
        toc = time.time()
        return toc - tic

    def check_card_found(self):
        """Looks up the pose for where the HSR's hand should go to."""
        transforms = self.tl.getFrameStrings()
        cards = []
        try:
            for transform in transforms:
                current_grasp = 'bed_' + str(self.grasp_count)
                if current_grasp in transform:
                    print('found {}'.format(current_grasp))
                    f_p = self.tl.lookupTransform('map', transform,
                                                  rospy.Time(0))
                    cards.append(transform)
        except:
            rospy.logerr('bed pick not found yet')
        return True, cards

    def _test_grasp(self):
        """Simple tests for grasping. Don't forget to process depth images.

        Do this independently of any rollout ...
        """
        print("\nNow in `test_grasp` to check grasping net...")
        self.position_head()
        time.sleep(3)

        c_img = self.cam.read_color_data()
        d_img = self.cam.read_depth_data()
        if np.isnan(np.sum(d_img)):
            cv2.patchNaNs(d_img, 0.0)
        d_img = depth_to_net_dim(d_img, robot='HSR')
        pred = self.g_detector.predict(np.copy(d_img))
        img = self.dp.draw_prediction(d_img, pred)

        print("prediction: {}".format(pred))
        caption = 'G Predicted: {} (ESC to abort, other key to proceed)'.format(
            pred)
        cv2.imshow(caption, img)
        key = cv2.waitKey(0)
        if key in ESC_KEYS:
            print("Pressed ESC key. Terminating program...")
            sys.exit()

    def _test_success(self):
        """Simple tests for success net. Don't forget to process depth images.

        Should be done after a grasp test since I don't re-position...  Note: we
        have access to `self.sn` but that isn't the actual net which has a
        `predict`, but it's a wrapper (explained above), but we can access the
        true network via `self.sn.sdect` and from there call `predict`.
        """
        print("\nNow in `test_success` to check success net...")
        time.sleep(3)
        c_img = self.cam.read_color_data()
        d_img = self.cam.read_depth_data()
        if np.isnan(np.sum(d_img)):
            cv2.patchNaNs(d_img, 0.0)
        d_img = depth_to_net_dim(d_img, robot='HSR')
        result = self.sn.sdect.predict(np.copy(d_img))
        result = np.squeeze(result)

        print("s-net pred: {} (if [0]<[1] failure, else success...)".format(
            result))
        caption = 'S Predicted: {} (ESC to abort, other key to proceed)'.format(
            result)
        cv2.imshow(caption, d_img)
        key = cv2.waitKey(0)
        if key in ESC_KEYS:
            print("Pressed ESC key. Terminating program...")
            sys.exit()

    def _test_variables(self):
        """Test to see if TF variables were loaded correctly.
        """
        vars = tf.trainable_variables()
        print("\ntf.trainable_variables:")
        for vv in vars:
            print("  {}".format(vv))
        print("done\n")
Beispiel #6
0
class FindObject():

	def __init__(self,features,user_name = None):
		
		self.com = COM()
		self.robot = hsrb_interface.Robot()

		self.noise = 0.1
		self.features = features#self.com.binary_image
		self.count = 0
	
		if(not user_name == None):
			self.com.Options.setup(self.com.Options.root_dir,user_name)

		

		self.omni_base = self.robot.get('omni_base')
		self.whole_body = self.robot.get('whole_body')
		self.gripper = self.robot.get('gripper')

		#self.com.go_to_initial_state(self.whole_body,self.gripper)

		self.joystick = JoyStick_X(self.com)
		self.tl = TransformListener() 

		self.policy = Policy(self.com,self.features)

	def run(self):

		if(self.policy.cam.is_updated):
			self.com.load_net()
			self.policy.rollout()
			self.com.clean_up()

			self.policy.cam.is_updated = False
			self.count += 1
		

	def check_initial_state(self):

		go = False
		sticky = True
		while  sticky: 
			self.joystick.apply_control()
			cur_recording = self.joystick.get_record_actions()
			if(cur_recording[1] > -0.1):
				print "BEGIN ROLLOUT"
				sticky  = False


		while not go:
			img_rgb = self.policy.cam.read_color_data()
			img_depth = self.policy.cam.read_depth_data()
			
			state = self.com.format_data(img_rgb,img_depth)
			cv2.imshow('initial_state', state[0] )
			cv2.waitKey(30)
			self.joystick.apply_control()
			cur_recording = self.joystick.get_record_actions()
			if(cur_recording[1] < -0.1):
				print "BEGIN ROLLOUT"
				go = True


	def go_to_initial_state(self):
		self.com.go_to_initial_state(self.whole_body,self.gripper)


	def clear_data(self):
		self.trajectory = []

	def mark_success(self,q):

		state = {}
		if(q == 'y'):
			state['success'] = 1
		else:
			state['success'] = 0

		self.trajectory.append(state)

	def is_goal_objet(self):

		
		try:
		
			full_pose = self.tl.lookupTransform('head_l_stereo_camera_frame','ar_marker/9', rospy.Time(0))
			trans = full_pose[0]

			transforms = self.tl.getFrameStrings()
			poses = []


			

			for transform in transforms:
				if 'bottle' in transform:
					f_p = self.tl.lookupTransform('head_rgbd_sensor_link',transform, rospy.Time(0))
					poses.append(f_p[0])
					print 'augmented pose ',f_p
					print 'ar_marker ', trans


			for pose in poses:
				
				if(LA.norm(pose[2]-0.1-trans[2])< 0.03):
					return True
		except: 
			rospy.logerr('AR MARKER NOT THERE')

		return False


	def check_success(self):

		self.com.clean_up()

		self.b_d = Bottle_Detect(self.policy.cam.read_info_data())

		img_rgb = self.policy.cam.read_color_data()
		img_depth = self.policy.cam.read_depth_data()

		s_obj,img_detect,poses = self.b_d.detect_bottle(img_rgb,img_depth)

		success = self.is_goal_objet()

		self.b_d.clean_up()

		self.process_data(img_rgb,img_detect,poses,success)



		print "BOTTLE FOUND ",success

		return success

	def process_data(self,img_rgb,img_detect,object_poses,success):

		img_rgb_cr,img_d = self.com.format_data(img_rgb,None)

		state = {}
		state['color_img'] = img_rgb_cr
		state['found_object'] = img_detect
		state['object_poses'] = object_poses
		state['was_success'] = success

		self.trajectory.append(state)

	def save_data(self):

		self.com.save_evaluation(self.trajectory)


	def check_marker(self):

		try: 
			A = self.tl.lookupTransform('head_l_stereo_camera_frame','ar_marker/9', rospy.Time(0))
		except: 
			rospy.logerr('trash not found')
			return False

		return True


	def execute_grasp(self):

		self.com.grip_open(self.gripper)
		self.whole_body.end_effector_frame = 'hand_palm_link'
		nothing = True
		
		try:
			self.whole_body.move_end_effector_pose(geometry.pose(y=0.15,z=-0.09, ek=-1.57),'ar_marker/9')
			
		except:
			rospy.logerr('mustard bottle found')

		self.com.grip_squeeze(self.gripper)

		self.whole_body.move_end_effector_pose(geometry.pose(z=-0.9),'hand_palm_link')

		self.com.grip_open(self.gripper)
		self.com.grip_squeeze(self.gripper)
Beispiel #7
0
class BottlePicker():
    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''

        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()

        # if cfg.USE_WEB_INTERFACE:
        #     self.wl = Web_Labeler()
        # else:
        #     self.wl = Python_Labeler(cam = self.cam)

        self.com.go_to_initial_state(self.whole_body)

        self.tt = TableTop()
        self.tt.find_table(self.robot)

        self.grasp_count = 0

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        self.gp = GraspPlanner()

        self.gripper = Lego_Gripper(self.gp, self.cam, self.com.Options,
                                    self.robot.get('gripper'))

        self.RCNN = Depth_Object("bottle")
        #self.test_current_point()

        #thread.start_new_thread(self.ql.run,())
        print "after thread"

    def find_mean_depth(self, d_img):
        '''
        Evaluates the current policy and then executes the motion 
        specified in the the common class
        '''

        indx = np.nonzero(d_img)

        mean = np.mean(d_img[indx])

        return

    def move_to_top_side(self):
        self.tt.move_to_pose(self.omni_base, 'right_down')
        self.tt.move_to_pose(self.omni_base, 'right_up')

    def bottle_pick(self):

        # self.rollout_data = []
        self.position_head()

        self.move_to_top_side()
        print("ARRIVED AT TOP SIDE")
        time.sleep(2)

        #cycle through positions for a long time (30)
        pose_num = 0
        pose_sequence = ['top_mid_far', 'top_left_far', 'top_mid']
        while pose_num < 30:
            pose_name = pose_sequence[pose_num % len(pose_sequence)]
            self.tt.move_to_pose(self.omni_base, pose_name)
            print("ARRIVED AT POSE " + pose_name)
            pose_num += 1

            c_img = self.cam.read_color_data()
            d_img = self.cam.read_depth_data()
            if (not c_img == None and not d_img == None):
                centers, out_img = self.RCNN.detect(c_img)

                # if self.get_new_grasp:
                #     c_m, dirs = run_connected_components(c_img)
                #     draw(c_img,c_m,dirs)

                #     c_img = self.cam.read_color_data()
                #     d_img = self.cam.read_depth_data()

                #     self.gripper.find_pick_region_cc(c_m[0],dirs[0],c_img,d_img,self.grasp_count)

                # pick_found,bed_pick = self.check_card_found()

                # self.gripper.execute_grasp(bed_pick,self.whole_body,'head_down')

                # self.grasp_count += 1
                # self.whole_body.move_to_go()
                # self.tt.move_to_pose(self.omni_base,'lower_start')
                # time.sleep(1)
                # self.whole_body.move_to_joint_positions({'head_tilt_joint':-0.8})

                print("DETECTED: " + str(centers))
                cv2.imwrite("debug_imgs/debug" + str(pose_num) + ".png",
                            out_img)
            timer.sleep(5)

    def check_card_found(self):

        # try:
        transforms = self.tl.getFrameStrings()

        cards = []

        try:

            for transform in transforms:
                print transform
                current_grasp = 'bed_' + str(self.grasp_count)
                if current_grasp in transform:
                    print 'got here'
                    f_p = self.tl.lookupTransform('map', transform,
                                                  rospy.Time(0))
                    cards.append(transform)

        except:
            rospy.logerr('bed pick not found yet')

        return True, cards

    def position_head(self):

        self.tt.move_to_pose(self.omni_base, 'lower_start')
        self.whole_body.move_to_joint_positions({'head_tilt_joint': -0.8})
Beispiel #8
0
class BedMaker():
    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''

        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()

        if cfg.USE_WEB_INTERFACE:
            self.wl = Web_Labeler()
        else:
            self.wl = Python_Labeler(self.cam)

        self.com.go_to_initial_state(self.whole_body)

        self.tt = TableTop()
        self.tt.find_table(self.robot)
        self.ins = InitialSampler(self.cam)

        self.grasp_count = 0

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        self.gp = GraspPlanner()

        self.gripper = Bed_Gripper(self.gp, self.cam, self.com.Options,
                                   self.robot.get('gripper'))

        self.g_detector = Analytic_Grasp()

        self.sn = Success_Net(self.whole_body, self.tt, self.cam,
                              self.omni_base)

        c_img = self.cam.read_color_data()

        #self.test_current_point()
        time.sleep(4)
        #thread.start_new_thread(self.ql.run,())
        print "after thread"

    def find_mean_depth(self, d_img):
        '''
        Evaluates the current policy and then executes the motion 
        specified in the the common class
        '''

        indx = np.nonzero(d_img)

        mean = np.mean(d_img[indx])

        return

    def bed_make(self):

        self.rollout_stats = []
        self.get_new_grasp = True

        if cfg.INS_SAMPLE:
            u_c, d_c = self.ins.sample_initial_state()

            self.rollout_stats.append([u_c, d_c])

        self.new_grasp = True
        while True:

            c_img = self.cam.read_color_data()
            d_img = self.cam.read_depth_data()
            if (not c_img == None and not d_img == None):

                if self.new_grasp:
                    self.position_head()
                else:
                    self.new_grasp = True
                time.sleep(3)

                c_img = self.cam.read_color_data()
                d_img = self.cam.read_depth_data()

                #CHANGE HERE
                grasp_factor = 3
                img_small = cv2.resize(np.copy(c_img), (640 / 3, 480 / 3))

                sgraspt = time.time()
                data = self.g_detector.get_grasp(img_small, grasp_factor)
                egraspt = time.time()
                print("Grasp predict time: " + str(egraspt - sgraspt))

                data = 3 * data
                IPython.embed()

                self.record_stats(c_img, d_img, data, self.side, 'grasp')

                self.gripper.find_pick_region_net(data, c_img, d_img,
                                                  self.grasp_count)

                pick_found, bed_pick = self.check_card_found()

                if self.side == "BOTTOM":
                    self.gripper.execute_grasp(bed_pick, self.whole_body,
                                               'head_down')
                else:
                    self.gripper.execute_grasp(bed_pick, self.whole_body,
                                               'head_up')

                self.check_success_state(c_img, d_img)

    def check_success_state(self, c_img, d_img):

        if self.side == "BOTTOM":
            success, data, c_img = self.sn.check_bottom_success(self.wl)
        else:
            success, data, c_img = self.sn.check_top_success(self.wl)

        self.record_stats(c_img, d_img, data, self.side, 'success')

        print "WAS SUCCESFUL: "
        print success
        if (success):

            if self.side == "BOTTOM":
                self.transition_to_top()
            else:
                self.transition_to_start()

            self.update_side()
        else:
            self.new_grasp = False

        self.grasp_count += 1

        if self.grasp_count > cfg.GRASP_OUT:
            self.transition_to_start()

    def update_side(self):

        if self.side == "BOTTOM":
            self.side = "TOP"

    def transition_to_top(self):
        if cfg.DEBUG_MODE:
            self.com.save_stat(self.rollout_stats)
            self.tt.move_to_pose(self.omni_base, 'lower_mid')
            sys.exit()
        else:
            self.move_to_top_side()

    def transition_to_start(self):
        self.com.save_stat(self.rollout_stats)
        self.move_to_start()
        sys.exit()

    def record_stats(self, c_img, d_img, data, side, typ):

        grasp_point = {}

        grasp_point['c_img'] = c_img
        grasp_point['d_img'] = d_img

        if typ == "grasp":
            grasp_point['net_pose'] = data
        else:
            grasp_point['net_trans'] = data

        grasp_point['side'] = side
        grasp_point['type'] = typ

        self.rollout_stats.append(grasp_point)

    def position_head(self):

        if self.side == "TOP":
            self.whole_body.move_to_joint_positions({'head_tilt_joint': -0.8})
        elif self.side == "BOTTOM":
            self.tt.move_to_pose(self.omni_base, 'lower_start')
            self.whole_body.move_to_joint_positions({'head_tilt_joint': -0.8})

    def move_to_top_side(self):

        self.tt.move_to_pose(self.omni_base, 'right_down')

        self.tt.move_to_pose(self.omni_base, 'right_up')

        self.tt.move_to_pose(self.omni_base, 'top_mid')

    def move_to_start(self):

        if self.side == "BOTTOM":
            self.tt.move_to_pose(self.omni_base, 'lower_mid')
        else:

            self.tt.move_to_pose(self.omni_base, 'right_up')

            self.tt.move_to_pose(self.omni_base, 'right_down')
            self.tt.move_to_pose(self.omni_base, 'lower_mid')

    def check_bottom_success(self):

        self.tt.move_to_pose(self.omni_base, 'lower_mid')
        self.whole_body.move_to_joint_positions({'head_tilt_joint': -0.8})

    def check_card_found(self):
        time.sleep(1)
        # try:
        transforms = self.tl.getFrameStrings()

        cards = []

        try:

            for transform in transforms:
                print transform
                current_grasp = 'bed_' + str(self.grasp_count)
                if current_grasp in transform:
                    print 'got here'
                    f_p = self.tl.lookupTransform('map', transform,
                                                  rospy.Time(0))
                    cards.append(transform)

        except:
            rospy.logerr('bed pick not found yet')

        return True, cards
Beispiel #9
0
class BedMaker():

    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''

        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        
        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()



        # if cfg.USE_WEB_INTERFACE:
        #     self.wl = Web_Labeler()
        # else:
        #     self.wl = Python_Labeler(cam = self.cam)


        self.com.go_to_initial_state(self.whole_body)
        
        self.tt = TableTop()
        self.tt.find_table(self.robot)
        
    
        self.grasp_count = 0
      

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()



        self.gp = GraspPlanner()

        self.gripper = Lego_Gripper(self.gp,self.cam,self.com.Options,self.robot.get('gripper'))


        #self.test_current_point()
       
        #thread.start_new_thread(self.ql.run,())
        print "after thread"

       


    def find_mean_depth(self,d_img):
        '''
        Evaluates the current policy and then executes the motion 
        specified in the the common class
        '''

        indx = np.nonzero(d_img)

        mean = np.mean(d_img[indx])

        return


    def bed_make(self):

        self.rollout_data = []
        self.get_new_grasp = True

        self.position_head()
        while True:

            

            time.sleep(2)

            c_img = self.cam.read_color_data()
            d_img = self.cam.read_depth_data()

            
            if(not c_img == None and not d_img == None):


                if self.get_new_grasp:
                   
                    c_m, dirs = run_connected_components(c_img)
                    draw(c_img,c_m,dirs)
                    
                    
                    c_img = self.cam.read_color_data()
                    d_img = self.cam.read_depth_data()
                   

                    self.gripper.find_pick_region_cc(c_m[0],dirs[0],c_img,d_img,self.grasp_count)
        
               
                
                pick_found,bed_pick = self.check_card_found()

                self.gripper.execute_grasp(bed_pick,self.whole_body,'head_down')
                
                self.grasp_count += 1
                self.whole_body.move_to_go()
                self.tt.move_to_pose(self.omni_base,'lower_start')
                time.sleep(1)
                self.whole_body.move_to_joint_positions({'head_tilt_joint':-0.8})
 
    

    def check_card_found(self):

        # try:
        transforms = self.tl.getFrameStrings()
    
        cards = []

        try:
        
            for transform in transforms:
                print transform
                current_grasp = 'bed_'+str(self.grasp_count)
                if current_grasp in transform:
                    print 'got here'
                    f_p = self.tl.lookupTransform('map',transform, rospy.Time(0))
                    cards.append(transform)

        except: 
            rospy.logerr('bed pick not found yet')
                

        return True, cards
    
    def position_head(self):

        self.tt.move_to_pose(self.omni_base,'lower_start')
        self.whole_body.move_to_joint_positions({'head_tilt_joint':-0.8})
Beispiel #10
0
class StarFinder(object):
    """docstring for StarFinder"""
    def __init__(self):
        super(StarFinder, self).__init__()
        self.odom = None
        self.colors = None
        self.route = None
        self.targ_node = 0
        self.at_wp = False
        self.map = None
        self.waypoint = None
        self.wp_pose = None
        self.curr_star = 0

        self.color_guess = 0
        self.pos_guessx = 0
        self.pos_guessy = 0

        self.grid_pos = None
        self.goal_pos = None

        self.tf = TransformListener()

    def run(self, rate, cmd_pub, get_wp):
        while not rospy.is_shutdown():

            if self.at_wp or test_mode:
                self.at_wp = not self.find_star()
            elif self.route is not None:
                self.update_pos()
                if self.route.is_at_node(self.grid_pos, self.targ_node):
                    if self.targ_node == len(self.route.nodes):
                        self.at_wp = True
                        continue
                    self.targ_node += 1
                self.move_to_next()

            rate.sleep()

    def odom_callback(self, odom):
        self.odom = odom

    def color_callback(self, colors):
        self.colors = colors

    def update_pos(self):
        print("Update Pos")
        if self.tf.frameExists("base_link"):
            # time = self.tf.getLatestCommonTime("/odometry/filtered", "/map")
            self.robo_stamped = to_stamped("base_link", self.odom.pose.pose)
            r_pos = self.odom.pose.pose.position
            self.grid_pos = (ANode(r_pos) / self.map.info.resolution).int()
            # self.grid_pos = self.tf.transformPose("base_link", self.robo_stamped).pose.position
            if self.wp_pose is not None:
                self.goal_pos = self.tf.transformPose(
                    "base_link", self.wp_pose_stamped).pose.position
            return True
        else:
            print("No tf for /map")
            print("List: {}".format(self.tf.getFrameStrings()))
            return False

    def map_callback(self, grid):
        print("Update Map")
        self.update_waypoint(self.curr_star == 0, self.color_guess,
                             self.pos_guessx, self.pos_guessy)

        self.map = grid
        if not self.update_pos() or test_mode:
            return
        self.route = Route(grid, self.grid_pos, self.goal_pos)
        self.targ_node = 0

    def update_waypoint(self, first, color=None, posx=None, posy=None):
        print("Update Waypoint")
        if first:
            self.waypoint = get_wp(firstwp=True)
        else:
            self.waypoint = get_wp(firstwp=False,
                                   symboldetected=True,
                                   symbolcolor=color,
                                   symbolpositionx=posx,
                                   symbolpositiony=posy)

        self.wp_pose = Pose(
            Point(self.waypoint.waypointx, self.waypoint.waypointy, 0),
            Quaternion(0, 1, 0, 0))
        self.wp_pose_stamped = to_stamped("base_link", self.wp_pose)
        return self.waypoint.success

    def move_to_next(self):
        print("Move To Next Node")
        target = self.route[self.targ_node]
        pos = self.odom.pose.pose.position
        target = target * self.map.info.resolution

        print("    Target: {}, Pos: [{}, {}, {}]".format(
            target, pos.x, pos.y, pos.z))

        dvec = Vector3(target.x - pos.x, target.y - pos.y, 0)
        print("    Vel: {}".format(ANode(dvec)))

        ori = self.odom.pose.pose.orientation.w
        ang = parallelize(ori, ANode(dvec))
        lin = Vector3(1, 0, 0)
        vel = Twist(lin, ang)

        cmd_pub.publish(vel)

    def find_star(self):
        print("Find Star")
        if self.colors is None:
            return False
        colors = color_perception.camera_callback(self.colors)
        nearest = None
        nc = None
        for c in colors:
            pos = colors[c]
            pos = ANode(pos[0], pos[1], 0)
            # pos = to_stamped("camera_link", Pose(pos, Quaternion(0, 1, 0, 0)))
            # pos = ANode(self.tf.transformPose("base_link", pos).pose.position)
            if test_mode or nearest is None or ANode(
                    self.odom.pose.pose.position).dist(pos) < ANode(
                        self.odom.pose.pose.position).dist(nearest):
                nearest = pos
                nc = c
        if nearest is None:
            print("No star found.")
            return False
        self.pos_guessx = nearest.x
        self.pos_guessy = nearest.y
        self.color_guess = nc
        print("Star Guess: {} at [{}, {}]".format(self.color_guess,
                                                  self.pos_guessx,
                                                  self.pos_guessy))
        if self.update_waypoint(False, self.color_guess, self.pos_guessx,
                                self.pos_guessy):
            return True
        else:
            lin = Vector3(0, 0, 0)
            ang = Vector3(0, 12, 0)
            vel = Twist(lin, ang)
            cmd_pub.publish(vel)
            return False
Beispiel #11
0
class CardPicker():
    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''

        self.robot = hsrb_interface.Robot()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.cam = RGBD()
        self.com = COM()

        #self.com.go_to_initial_state(self.whole_body)

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()
        self.gp = GraspPlanner()
        self.detector = Detector()

        self.joystick = JoyStick_X(self.com)

        self.suction = Suction(self.gp, self.cam, self.com.Options)

        #self.suction.stop()
        #thread.start_new_thread(self.ql.run,())
        print "after thread"

    def find_mean_depth(self, d_img):
        '''
        Evaluates the current policy and then executes the motion 
        specified in the the common class
        '''

        indx = np.nonzero(d_img)

        mean = np.mean(d_img[indx])

        return

    def find_card(self):

        self.whole_body.move_to_neutral()
        self.whole_body.move_to_joint_positions({
            'arm_flex_joint': -1.57,
            'head_tilt_joint': -.785
        })
        print self.check_card_found()[0]
        i = 0
        while True:
            print "panning ", i
            self.whole_body.move_to_joint_positions({'head_pan_joint': .30})
            i += 1
            if self.check_card_found()[0]:
                break
        print "found card!"
        self.card_pick()

    def card_pick(self):

        while True:

            c_img = self.cam.read_color_data()
            c_img_c = np.copy(c_img)
            cv2.imshow('debug_true', c_img_c)
            cv2.waitKey(30)
            d_img = self.cam.read_depth_data()
            if (not c_img == None and not d_img == None):
                c_img_cropped, d_img = self.com.format_data(
                    np.copy(c_img), d_img)

                data = self.detector.numpy_detector(np.copy(c_img_cropped))

                cur_recording = self.joystick.get_record_actions_passive()
                self.suction.find_pick_region_net(data, c_img, d_img, c_img_c)
                if (cur_recording[0] < -0.1):

                    card_found, cards = self.check_card_found()

                    if (card_found):
                        self.suction.execute_grasp(cards, self.whole_body)

                        self.com.go_to_initial_state(self.whole_body)

    def check_card_found(self):

        # try:
        transforms = self.tl.getFrameStrings()

        cards = []

        for transform in transforms:
            #print transform
            if 'card' in transform:
                print 'got here'
                f_p = self.tl.lookupTransform('head_rgbd_sensor_rgb_frame',
                                              transform, rospy.Time(0))
                cards.append(transform)

                return True, cards
        # except:
        return False, []
Beispiel #12
0
class CardPicker():
    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''

        self.robot = hsrb_interface.Robot()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.cam = RGBD()
        self.com = COM()

        self.com.go_to_initial_state(self.whole_body)

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()
        self.gp = GraspPlanner()

        self.suction = Suction(self.gp, self.cam)

        #thread.start_new_thread(self.ql.run,())
        print "after thread"

    def find_mean_depth(self, d_img):
        '''
        Evaluates the current policy and then executes the motion 
        specified in the the common class
        '''

        indx = np.nonzero(d_img)

        mean = np.mean(d_img[indx])

        return

    def card_pick(self):

        while True:

            c_img = self.cam.read_color_data()
            d_img = self.cam.read_depth_data()
            if (not c_img == None and not d_img == None):

                self.ql = QueryLabeler()
                self.ql.run(c_img)
                data = self.ql.label_data
                del self.ql

                self.suction.find_pick_region(data, c_img, d_img)

                # card_found,cards = self.check_card_found()

                # if(card_found):
                #     self.suction.execute_grasp(cards,self.whole_body)

                # self.com.go_to_initial_state(self.whole_body)

    def check_card_found(self):

        # try:
        transforms = self.tl.getFrameStrings()

        cards = []

        for transform in transforms:
            print transform
            if 'card' in transform:
                print 'got here'
                f_p = self.tl.lookupTransform('head_rgbd_sensor_rgb_frame',
                                              transform, rospy.Time(0))
                cards.append(transform)

        return True, cards
class BedMaker():
    def __init__(self, args):
        """For data collection of bed-making, NOT the deployment.

        Assumes we roll out the robot's policy via code (not via human touch).
        This is the 'slower' way where we have the python interface that the
        human clicks on to indicate grasping points. Good news is, our deployment
        code is probably going to be similar to this.

        For joystick: you only need it plugged in for the initial state sampler,
        which (at the moment) we are not even using.
        """
        self.robot = robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()
        self.omni_base = robot.get('omni_base')
        self.whole_body = robot.get('whole_body')
        self.cam = RGBD()
        self.com = COM()
        self.wl = Python_Labeler(cam=self.cam)

        # View mode: STANDARD (the way I was doing earlier), CLOSE (the way they want).
        self.view_mode = cfg.VIEW_MODE

        # Set up initial state, table, etc.
        self.com.go_to_initial_state(self.whole_body)
        self.tt = TableTop()

        # For now, a workaround. Ugly but it should do the job ...
        #self.tt.find_table(robot)
        self.tt.make_fake_ar()
        self.tt.find_table_workaround(robot)

        #self.ins = InitialSampler(self.cam)
        self.side = 'BOTTOM'
        self.grasp_count = 0

        # Bells and whistles; note the 'success check' to check if transitioning
        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()
        self.gp = GraspPlanner()
        self.gripper = Bed_Gripper(self.gp, self.cam, self.com.Options,
                                   robot.get('gripper'))
        self.sc = Success_Check(self.whole_body, self.tt, self.cam,
                                self.omni_base)

        time.sleep(4)
        print(
            "Finished creating BedMaker()! Get the bed set up and run bed-making!"
        )
        if cfg.INS_SAMPLE:
            print("TODO: we don't have sampling code here.")

        # When we start, spin this so we can check the frames. Then un-comment,
        # etc. It's the current hack we have to get around crummy AR marker detection.
        if args.phase == 1:
            print("Now doing rospy.spin() because phase = 1.")
            rospy.spin()

    def bed_make(self):
        """Runs the pipeline for data collection.

        You can run this for multiple bed-making trajectories.
        For now, though, assume one call to this means one trajectory.
        """
        self.rollout_data = []
        self.get_new_grasp = True

        # I think, creates red line in GUI where we adjust the bed to match it.
        # But in general we better fix our sampler before doing this for real.
        # Don't forget to press 'B' on the joystick to get past this screen.
        if cfg.INS_SAMPLE:
            u_c, d_c = self.ins.sample_initial_state()
            self.rollout_data.append([u_c, d_c])

        while True:
            c_img = self.cam.read_color_data()
            d_img = self.cam.read_depth_data()

            if (not c_img.all() == None and not d_img.all() == None):
                if self.get_new_grasp:
                    self.position_head()

                    # Human supervisor labels. data = dictionary of relevant info
                    data = self.wl.label_image(c_img)
                    c_img = self.cam.read_color_data()
                    d_img = self.cam.read_depth_data()
                    self.add_data_point(c_img, d_img, data, self.side, 'grasp')

                    # Broadcasts grasp pose
                    self.gripper.find_pick_region_labeler(
                        data, c_img, d_img, self.grasp_count)

                # Execute the grasp and check for success. But if VIEW_MODE is
                # close, better to reset to a 'nicer' position for base movement.
                pick_found, bed_pick = self.check_card_found()
                if self.side == "BOTTOM":
                    self.whole_body.move_to_go()
                    self.tt.move_to_pose(self.omni_base, 'lower_start')
                    self.gripper.execute_grasp(bed_pick, self.whole_body,
                                               'head_down')
                else:
                    self.whole_body.move_to_go()
                    self.tt.move_to_pose(self.omni_base, 'top_mid')
                    self.gripper.execute_grasp(bed_pick, self.whole_body,
                                               'head_up')
                self.check_success_state()

    def check_success_state(self):
        """
        Checks whether a single grasp in a bed-making trajectory succeeded.
        Depends on which side of the bed the HSR is at. Invokes human supervisor
        and transitions the HSR if successful.
        """
        if self.side == "BOTTOM":
            success, data = self.sc.check_bottom_success(self.wl)
        else:
            success, data = self.sc.check_top_success(self.wl)
        c_img = self.cam.read_color_data()
        d_img = self.cam.read_depth_data()
        self.add_data_point(c_img, d_img, data, self.side, 'success')
        print("WAS SUCCESFUL: {}".format(success))

        # Handle transitioning to different side
        if success:
            if self.side == "BOTTOM":
                self.transition_to_top()
            else:
                self.transition_to_start()
            self.update_side()
            self.grasp_count += 1
            self.get_new_grasp = True
        else:
            self.grasp_count += 1
            # If grasp failure, invokes finding region again and add new data
            self.gripper.find_pick_region_labeler(data, c_img, d_img,
                                                  self.grasp_count)
            self.add_data_point(c_img, d_img, data, self.side, 'grasp')
            self.get_new_grasp = False

    def update_side(self):
        """TODO: extend to multiple side switches?"""
        if self.side == "BOTTOM":
            self.side = "TOP"

    def transition_to_top(self):
        """Transition to top (not bottom)."""
        self.move_to_top_side()

    def transition_to_start(self):
        """Transition to start=bottom, save rollout data, exit program.
        Saves to a supervisor's directory since we're using a supervisor.
        """
        self.com.save_rollout(self.rollout_data)
        self.move_to_start()
        sys.exit()

    def add_data_point(self, c_img, d_img, data, side, typ, pose=None):
        """Adds a dictionary to the `rollout_data` list."""
        grasp_point = {}
        grasp_point['c_img'] = c_img
        grasp_point['d_img'] = d_img
        if pose == None:
            label = data['objects'][0]['box']
            pose = [(label[2] - label[0]) / 2.0 + label[0],
                    (label[3] - label[1]) / 2.0 + label[1]]
        grasp_point['pose'] = pose
        grasp_point['class'] = data['objects'][0]['class']
        grasp_point['side'] = side
        grasp_point['type'] = typ
        self.rollout_data.append(grasp_point)

    def position_head(self):
        """Position the head for a grasp attempt.
        After playing around a bit, I think `head_tilt_joint` should be set last.
        """
        self.whole_body.move_to_go()
        if self.side == "BOTTOM":
            self.tt.move_to_pose(self.omni_base, 'lower_start_tmp')
        self.whole_body.move_to_joint_positions(
            {'arm_flex_joint': -np.pi / 16.0})
        self.whole_body.move_to_joint_positions(
            {'head_pan_joint': np.pi / 2.0})
        self.whole_body.move_to_joint_positions({'arm_lift_joint': 0.120})
        self.whole_body.move_to_joint_positions(
            {'head_tilt_joint': -np.pi / 4.0})

    def move_to_top_side(self):
        """Assumes we're at the bottom and want to go to the top."""
        self.whole_body.move_to_go()
        self.tt.move_to_pose(self.omni_base, 'right_down')
        self.tt.move_to_pose(self.omni_base, 'right_mid')
        self.tt.move_to_pose(self.omni_base, 'right_up')
        self.tt.move_to_pose(self.omni_base, 'top_mid_tmp')

    def move_to_start(self):
        """Assumes we're at the top and we go back to the start."""
        self.whole_body.move_to_go()
        self.tt.move_to_pose(self.omni_base, 'right_up')
        self.tt.move_to_pose(self.omni_base, 'right_mid')
        self.tt.move_to_pose(self.omni_base, 'right_down')
        self.tt.move_to_pose(self.omni_base, 'lower_mid')

    def check_card_found(self):
        """Looks up the pose for where the HSR's hand should go to."""
        transforms = self.tl.getFrameStrings()
        cards = []
        try:
            for transform in transforms:
                current_grasp = 'bed_' + str(self.grasp_count)
                if current_grasp in transform:
                    print('found {}'.format(current_grasp))
                    f_p = self.tl.lookupTransform('map', transform,
                                                  rospy.Time(0))
                    cards.append(transform)
        except:
            rospy.logerr('bed pick not found yet')
        return True, cards
Beispiel #14
0
class BedMaker():
    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''

        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()

        if cfg.USE_WEB_INTERFACE:
            self.wl = Web_Labeler()
        else:
            self.wl = Python_Labeler(cam=self.cam)

        self.com.go_to_initial_state(self.whole_body)

        self.tt = TableTop()
        self.tt.find_table(self.robot)

        self.grasp_count = 0

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        self.ins = InitialSampler(self.cam)

        self.gp = GraspPlanner()

        self.gripper = Bed_Gripper(self.gp, self.cam, self.com.Options,
                                   self.robot.get('gripper'))

        self.sc = Success_Check(self.whole_body, self.tt, self.cam,
                                self.omni_base)

        self.ss = Self_Supervised(self.cam)

        #self.test_current_point()
        time.sleep(4)
        #thread.start_new_thread(self.ql.run,())
        print "after thread"

    def find_mean_depth(self, d_img):
        '''
        Evaluates the current policy and then executes the motion 
        specified in the the common class
        '''

        indx = np.nonzero(d_img)

        mean = np.mean(d_img[indx])

        return

    def bed_make(self):

        self.rollout_data = []
        self.get_new_grasp = True

        if cfg.INS_SAMPLE:
            u_c, d_c = self.ins.sample_initial_state()

            self.rollout_data.append([u_c, d_c])

        while True:

            c_img = self.cam.read_color_data()
            d_img = self.cam.read_depth_data()

            if (not c_img == None and not d_img == None):

                if self.get_new_grasp:
                    self.position_head()
                    data = self.wl.label_image(c_img)

                    c_img = self.cam.read_color_data()
                    d_img = self.cam.read_depth_data()

                    self.add_data_point(c_img, d_img, data, self.side, 'grasp')

                    self.gripper.find_pick_region_labeler(
                        data, c_img, d_img, self.grasp_count)

                    if cfg.SS_LEARN:
                        grasp_points = self.ss.learn(self.whole_body,
                                                     self.grasp_count)
                        self.add_ss_data(grasp_points, data, self.side,
                                         'grasp')

                pick_found, bed_pick = self.check_card_found()

                if self.side == "BOTTOM":
                    self.gripper.execute_grasp(bed_pick, self.whole_body,
                                               'head_down')
                else:
                    self.gripper.execute_grasp(bed_pick, self.whole_body,
                                               'head_up')

                self.check_success_state()

    def check_success_state(self):

        if self.side == "BOTTOM":
            success, data = self.sc.check_bottom_success(self.wl)
        else:
            success, data = self.sc.check_top_success(self.wl)

        c_img = self.cam.read_color_data()
        d_img = self.cam.read_depth_data()

        self.add_data_point(c_img, d_img, data, self.side, 'success')

        print "WAS SUCCESFUL: "
        print success
        if (success):

            if cfg.SS_LEARN:
                grasp_points = self.ss.learn(self.whole_body, self.grasp_count)
                self.add_ss_data(grasp_points, data, self.side, 'success')

            if self.side == "BOTTOM":
                self.transition_to_top()
            else:
                self.transition_to_start()

            self.update_side()
            self.grasp_count += 1
            self.get_new_grasp = True

        else:
            self.grasp_count += 1
            self.gripper.find_pick_region_labeler(data, c_img, d_img,
                                                  self.grasp_count)
            self.add_data_point(c_img, d_img, data, self.side, 'grasp')

            self.get_new_grasp = False

            if cfg.SS_LEARN:
                grasp_points = self.ss.learn(self.whole_body, self.grasp_count)
                self.add_ss_data(grasp_points, data, self.side, 'success')

    def update_side(self):

        if self.side == "BOTTOM":
            self.side = "TOP"

    def transition_to_top(self):
        if cfg.DEBUG_MODE:
            self.com.save_rollout(self.rollout_data)
            self.tt.move_to_pose(self.omni_base, 'lower_mid')
            sys.exit()
        else:
            self.move_to_top_side()

    def transition_to_start(self):
        self.com.save_rollout(self.rollout_data)
        self.move_to_start()
        sys.exit()

    def add_data_point(self, c_img, d_img, data, side, typ, pose=None):

        grasp_point = {}

        grasp_point['c_img'] = c_img
        grasp_point['d_img'] = d_img

        if pose == None:
            label = data['objects'][0]['box']
            pose = [(label[2] - label[0]) / 2.0 + label[0],
                    (label[3] - label[1]) / 2.0 + label[1]]

        grasp_point['pose'] = pose

        grasp_point['class'] = data['objects'][0]['class']
        grasp_point['side'] = side
        grasp_point['type'] = typ

        self.rollout_data.append(grasp_point)

    def position_head(self):

        if self.side == "TOP":
            self.whole_body.move_to_joint_positions({'head_tilt_joint': -0.8})
        elif self.side == "BOTTOM":
            self.tt.move_to_pose(self.omni_base, 'lower_start')
            self.whole_body.move_to_joint_positions({'head_tilt_joint': -0.8})

    def add_ss_data(self, g_points, data, side, typ):

        for g_point in g_points:

            self.add_data_point(g_point['c_img'],
                                g_point['d_img'],
                                data,
                                side,
                                typ,
                                pose=g_point['pose'])

    def move_to_top_side(self):

        self.tt.move_to_pose(self.omni_base, 'right_down')
        #self.tt.move_to_pose(self.omni_base,'right_mid')

        self.tt.move_to_pose(self.omni_base, 'right_up')

        self.tt.move_to_pose(self.omni_base, 'top_mid')

    def move_to_start(self):

        self.tt.move_to_pose(self.omni_base, 'right_up')
        #self.tt.move_to_pose(self.omni_base,'right_mid')
        self.tt.move_to_pose(self.omni_base, 'right_down')
        self.tt.move_to_pose(self.omni_base, 'lower_mid')

    def check_bottom_success(self):

        self.tt.move_to_pose(self.omni_base, 'lower_mid')
        self.whole_body.move_to_joint_positions({'head_tilt_joint': -0.8})

    def check_card_found(self):

        # try:
        transforms = self.tl.getFrameStrings()

        cards = []

        try:

            for transform in transforms:
                print transform
                current_grasp = 'bed_' + str(self.grasp_count)
                if current_grasp in transform:
                    print 'got here'
                    f_p = self.tl.lookupTransform('map', transform,
                                                  rospy.Time(0))
                    cards.append(transform)

        except:
            rospy.logerr('bed pick not found yet')

        return True, cards