Beispiel #1
0
class CollectData():
    def __init__(self):
        """
        Class to run HSR lego task

        """

        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()
        # if not DEBUG:
        self.com.go_to_initial_state(self.whole_body)

        #     self.tt = TableTop()
        #     self.tt.find_table(self.robot)

        self.grasp_count = 0

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        self.gp = GraspPlanner()
        self.gripper = Crane_Gripper(self.gp, self.cam, self.com.Options,
                                     self.robot.get('gripper'))
        self.suction = Suction_Gripper(self.gp, self.cam, self.com.Options,
                                       self.robot.get('suction'))

        self.gm = GraspManipulator(self.gp, self.gripper, self.suction,
                                   self.whole_body, self.omni_base, self.tl)

        self.collision_world = hsrb_interface.collision_world.CollisionWorld(
            "global_collision_world")
        self.collision_world.remove_all()
        self.collision_world.add_box(x=.8,
                                     y=.9,
                                     z=0.5,
                                     pose=geometry.pose(y=1.4, z=0.15),
                                     frame_id='map')

        print "after thread"

    def collect_data(self):
        self.gm.position_head()

        time.sleep(3)  #making sure the robot is finished moving
        i = 0
        while True:
            c_img = self.cam.read_color_data()
            d_img = self.cam.read_depth_data()
            cv2.imwrite("debug_imgs/data_chris/test" + str(i) + ".png", c_img)
            #can also save d_img
            IPython.embed()  #re-arrange setup here
            i += 1
Beispiel #2
0
    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''

        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        #PARAMETERS TO CHANGE
        self.side = 'TOP'

        self.r_count = 0

        self.grasp_count = 0
        self.success_count = 0

        self.true_count = 0
        self.grasp = True

        self.r_count = self.get_rollout_number()

        self.cam = RGBD()
        self.com = COM()

        self.joystick = JoyStick_X(self.com)

        if cfg.USE_WEB_INTERFACE:
            self.wl = Web_Labeler()
        else:
            self.wl = Python_Labeler(cam=self.cam)

        self.com.go_to_initial_state(self.whole_body)

        self.tt = TableTop()
        self.tt.find_table(self.robot)
        self.position_head()

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        #self.test_current_point()
        time.sleep(4)

        #thread.start_new_thread(self.ql.run,())
        print "after thread"
    def __init__(self):

        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()

        self.com.go_to_initial_state(self.whole_body)

        self.grasp_count = 0

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        self.gp = GraspPlanner()
        self.gripper = Crane_Gripper(self.gp, self.cam, self.com.Options,
                                     self.robot.get('gripper'))
        self.suction = Suction_Gripper(self.gp, self.cam, self.com.Options,
                                       self.robot.get('suction'))

        self.gm = GraspManipulator(self.gp, self.gripper, self.suction,
                                   self.whole_body, self.omni_base, self.tl)

        self.web = Web_Labeler()
        print "after thread"
Beispiel #4
0
    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''

        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()

        if cfg.USE_WEB_INTERFACE:
            self.wl = Web_Labeler()
        else:
            self.wl = Python_Labeler(self.cam)

        self.com.go_to_initial_state(self.whole_body)

        self.tt = TableTop()
        self.tt.find_table(self.robot)
        self.ins = InitialSampler(self.cam)

        self.grasp_count = 0

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        self.gp = GraspPlanner()

        self.gripper = Bed_Gripper(self.gp, self.cam, self.com.Options,
                                   self.robot.get('gripper'))

        self.g_detector = Analytic_Grasp()

        self.sn = Success_Net(self.whole_body, self.tt, self.cam,
                              self.omni_base)

        c_img = self.cam.read_color_data()

        #self.test_current_point()
        time.sleep(4)
        #thread.start_new_thread(self.ql.run,())
        print "after thread"
Beispiel #5
0
    def __init__(self,graspPlanner,cam,options,gripper):
        #topic_name = '/hsrb/head_rgbd_sensor/depth_registered/image_raw'
        not_read = True
        while not_read:
            try:
                cam_info = cam.read_info_data()
                if(not cam_info == None):
                    not_read = False
            except:
                rospy.logerr('info not recieved')
        # Bells and whisltes
        self.pcm = PCM()
        self.pcm.fromCameraInfo(cam_info)
        self.br = tf.TransformBroadcaster()
        self.tl = tf.TransformListener()
        self.gp = graspPlanner
        self.gripper = gripper
        self.options = options
        self.com = COM()

        # See paper for details, used to release grasp if too much force.
        self.tension = Tensioner()

        # Side, need to change this.
        self.side = 'BOTTOM'
        self.offset_x = 0.0 # positive means going to other side of bed (both sides!)
        self.offset_z = 0.0 # negative means going UP to ceiling
        self.apply_offset = False
    def __init__(self, args):
        """For data collection of bed-making, NOT the deployment.

        Assumes we roll out the robot's policy via code (not via human touch).
        This is the 'slower' way where we have the python interface that the
        human clicks on to indicate grasping points. Good news is, our deployment
        code is probably going to be similar to this.

        For joystick: you only need it plugged in for the initial state sampler,
        which (at the moment) we are not even using.
        """
        self.robot = robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()
        self.omni_base = robot.get('omni_base')
        self.whole_body = robot.get('whole_body')
        self.cam = RGBD()
        self.com = COM()
        self.wl = Python_Labeler(cam=self.cam)

        # View mode: STANDARD (the way I was doing earlier), CLOSE (the way they want).
        self.view_mode = cfg.VIEW_MODE

        # Set up initial state, table, etc.
        self.com.go_to_initial_state(self.whole_body)
        self.tt = TableTop()

        # For now, a workaround. Ugly but it should do the job ...
        #self.tt.find_table(robot)
        self.tt.make_fake_ar()
        self.tt.find_table_workaround(robot)

        #self.ins = InitialSampler(self.cam)
        self.side = 'BOTTOM'
        self.grasp_count = 0

        # Bells and whistles; note the 'success check' to check if transitioning
        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()
        self.gp = GraspPlanner()
        self.gripper = Bed_Gripper(self.gp, self.cam, self.com.Options,
                                   robot.get('gripper'))
        self.sc = Success_Check(self.whole_body, self.tt, self.cam,
                                self.omni_base)

        time.sleep(4)
        print(
            "Finished creating BedMaker()! Get the bed set up and run bed-making!"
        )
        if cfg.INS_SAMPLE:
            print("TODO: we don't have sampling code here.")

        # When we start, spin this so we can check the frames. Then un-comment,
        # etc. It's the current hack we have to get around crummy AR marker detection.
        if args.phase == 1:
            print("Now doing rospy.spin() because phase = 1.")
            rospy.spin()
Beispiel #7
0
    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''

        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()

        # if cfg.USE_WEB_INTERFACE:
        #     self.wl = Web_Labeler()
        # else:
        #     self.wl = Python_Labeler(cam = self.cam)

        self.com.go_to_initial_state(self.whole_body)

        self.tt = TableTop()
        self.tt.find_table(self.robot)

        self.grasp_count = 0

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        self.gp = GraspPlanner()

        self.gripper = Lego_Gripper(self.gp, self.cam, self.com.Options,
                                    self.robot.get('gripper'))

        self.RCNN = Depth_Object("bottle")
        #self.test_current_point()

        #thread.start_new_thread(self.ql.run,())
        print "after thread"
    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot
        com : The common class for the robot
        cam : An open bincam class

        debug : bool

            A bool to indicate whether or not to display a training set point for
            debuging.

        '''

        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()

        if not DEBUG:
            self.com.go_to_initial_state(self.whole_body)

            self.tt = TableTop()
            self.tt.find_table(self.robot)

        self.wl = Python_Labeler(cam=self.cam)

        self.grasp_count = 0

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        self.ds = data_saver('tpc_rollouts/rollouts')

        self.gp = GraspPlanner()
        self.gripper = Crane_Gripper(self.gp, cam, options,
                                     robot.get('gripper'))

        self.gm = GraspManipulator(self.gp, self.gripper, self.whole_body,
                                   self.omni_base, self.tt)

        print "after thread"
Beispiel #9
0
    def __init__(self):
        """
        Class to run HSR lego task

        """

        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()
        # if not DEBUG:
        self.com.go_to_initial_state(self.whole_body)

        #     self.tt = TableTop()
        #     self.tt.find_table(self.robot)

        self.grasp_count = 0

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        self.gp = GraspPlanner()
        self.gripper = Crane_Gripper(self.gp, self.cam, self.com.Options,
                                     self.robot.get('gripper'))
        self.suction = Suction_Gripper(self.gp, self.cam, self.com.Options,
                                       self.robot.get('suction'))

        self.gm = GraspManipulator(self.gp, self.gripper, self.suction,
                                   self.whole_body, self.omni_base, self.tl)

        self.collision_world = hsrb_interface.collision_world.CollisionWorld(
            "global_collision_world")
        self.collision_world.remove_all()
        self.collision_world.add_box(x=.8,
                                     y=.9,
                                     z=0.5,
                                     pose=geometry.pose(y=1.4, z=0.15),
                                     frame_id='map')

        print "after thread"
Beispiel #10
0
    def __init__(self):
        """
        Class to run HSR lego task

        """
        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()

        self.com.go_to_initial_state(self.whole_body)

        self.grasp_count = 0
        self.helper = Helper(cfg)

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        self.gp = GraspPlanner()
        self.gripper = Crane_Gripper(self.gp, self.cam, self.com.Options,
                                     self.robot.get('gripper'))
        self.suction = Suction_Gripper(self.gp, self.cam, self.com.Options,
                                       self.robot.get('suction'))

        self.gm = GraspManipulator(self.gp, self.gripper, self.suction,
                                   self.whole_body, self.omni_base, self.tl)

        self.dl = DataLogger("stats_data/model_base", cfg.EVALUATE)

        self.web = Web_Labeler(cfg.NUM_ROBOTS_ON_NETWORK)

        model_path = 'main/output_inference_graph.pb'
        label_map_path = 'main/object-detection.pbtxt'
        self.det = Detector(model_path, label_map_path)

        print "after thread"
Beispiel #11
0
    def __init__(self, graspPlanner, cam, options, gripper):
        #topic_name = '/hsrb/head_rgbd_sensor/depth_registered/image_raw'

        not_read = True
        while not_read:

            try:
                cam_info = cam.read_info_data()
                if (not cam_info == None):
                    not_read = False
            except:
                rospy.logerr('info not recieved')

        self.pcm = PCM()

        self.pcm.fromCameraInfo(cam_info)
        self.br = tf.TransformBroadcaster()
        self.tl = tf.TransformListener()
        self.gp = graspPlanner
        self.gripper = gripper
        self.options = options
        self.com = COM()

        self.tension = Tensioner()
Beispiel #12
0
class BedMaker():
    def __init__(self, args):
        """For deploying the bed-making policy, not for data collection.

        We use all three variants (analytic, human, networks) here due to
        similarities in code structure.
        """
        self.args = args
        DEBUG = True

        # Set up the robot.
        self.robot = robot = hsrb_interface.Robot()
        if DEBUG:
            print("finished: hsrb_interface.Robot()...")
        self.rgbd_map = RGBD2Map()
        self.omni_base = self.robot.get('omni_base')
        if DEBUG:
            print("finished: robot.get(omni_base)...")
        self.whole_body = self.robot.get('whole_body')
        if DEBUG:
            print("finished: robot.get(whole_body)...")
        self.cam = RGBD()
        self.com = COM()
        self.wl = Python_Labeler(cam=self.cam)

        # Set up initial state, table, etc. Don't forget view mode!
        self.view_mode = BED_CFG.VIEW_MODE
        self.com.go_to_initial_state(self.whole_body)
        if DEBUG:
            print("finished: go_to_initial_state() ...")
        self.tt = TableTop()
        if DEBUG:
            print("finished: TableTop()...")

        # For now, a workaround. Ugly but it should do the job ...
        #self.tt.find_table(robot)
        self.tt.make_fake_ar()
        self.tt.find_table_workaround(robot)

        #self.ins = InitialSampler(self.cam)
        self.side = 'BOTTOM'
        self.grasp_count = 0
        self.b_grasp_count = 0
        self.t_grasp_count = 0

        # AH, build the YOLO network beforehand.
        g_cfg = BED_CFG.GRASP_CONFIG
        s_cfg = BED_CFG.SUCC_CONFIG
        self.yc = YOLO_CONV(options=s_cfg)
        self.yc.load_network()

        # Policy for grasp detection, using Deep Imitation Learning.
        # Or, actually, sometimes we will use humans or an analytic version.
        if DEBUG:
            self._test_variables()
        print("\nnow forming the GDetector with type {}".format(args.g_type))
        if args.g_type == 'network':
            self.g_detector = GDetector(g_cfg, BED_CFG, yc=self.yc)
        elif args.g_type == 'analytic':
            self.g_detector = Analytic_Grasp()  # TODO not implemented!
        elif args.g_type == 'human':
            print("Using a human, don't need to have a `g_detector`. :-)")

        if DEBUG:
            self._test_variables()
            print("\nnow making success net")
        self.sn = Success_Net(self.whole_body,
                              self.tt,
                              self.cam,
                              self.omni_base,
                              fg_cfg=s_cfg,
                              bed_cfg=BED_CFG,
                              yc=self.yc)

        # Bells and whistles.
        self.br = TransformBroadcaster()
        self.tl = TransformListener()
        self.gp = GraspPlanner()
        self.gripper = Bed_Gripper(self.gp, self.cam, self.com.Options,
                                   robot.get('gripper'))
        self.dp = DrawPrediction()

        # When we start, do rospy.spin() to check the frames (phase 1). Then re-run.
        # The current hack we have to get around crummy AR marker detection. :-(
        if DEBUG:
            self._test_variables()
        print("Finished with init method")
        time.sleep(4)
        if args.phase == 1:
            print("Now doing rospy.spin() because phase = 1.")
            rospy.spin()

        # For evaluating coverage.
        self.img_start = None
        self.img_final = None
        self.img_start2 = None
        self.img_final2 = None

        # For grasp offsets.
        self.apply_offset = False

    def bed_make(self):
        """Runs the pipeline for deployment, testing out bed-making.
        """
        # Get the starting image (from USB webcam). Try a second as well.
        cap = cv2.VideoCapture(0)
        frame = None
        while frame is None:
            ret, frame = cap.read()
            cv2.waitKey(50)
        self.image_start = frame
        cv2.imwrite('image_start.png', self.image_start)

        _, frame = cap.read()
        self.image_start2 = frame
        cv2.imwrite('image_start2.png', self.image_start2)

        cap.release()
        print(
            "NOTE! Recorded `image_start` for coverage evaluation. Was it set up?"
        )

        def get_pose(data_all):
            # See `find_pick_region_labeler` in `p_pi/bed_making/gripper.py`.
            # It's because from the web labeler, we get a bunch of objects.
            # So we have to compute the pose (x,y) from it.
            res = data_all['objects'][0]
            x_min = float(res['box'][0])
            y_min = float(res['box'][1])
            x_max = float(res['box'][2])
            y_max = float(res['box'][3])
            x = (x_max - x_min) / 2.0 + x_min
            y = (y_max - y_min) / 2.0 + y_min
            return (x, y)

        args = self.args
        use_d = BED_CFG.GRASP_CONFIG.USE_DEPTH
        self.get_new_grasp = True
        self.new_grasp = True
        self.rollout_stats = []  # What we actually save for analysis later

        # Add to self.rollout_stats at the end for more timing info
        self.g_time_stats = []  # for _execution_ of a grasp
        self.move_time_stats = []  # for moving to the other side

        while True:
            c_img = self.cam.read_color_data()
            d_img = self.cam.read_depth_data()

            if (not c_img.all() == None and not d_img.all() == None):
                if self.new_grasp:
                    self.position_head()
                else:
                    self.new_grasp = True
                time.sleep(3)

                c_img = self.cam.read_color_data()
                d_img = self.cam.read_depth_data()
                d_img_raw = np.copy(d_img)  # Needed for determining grasp pose

                # --------------------------------------------------------------
                # Process depth images! Helps network, human, and (presumably) analytic.
                # Obviously human can see the c_img as well ... hard to compare fairly.
                # --------------------------------------------------------------
                if use_d:
                    if np.isnan(np.sum(d_img)):
                        cv2.patchNaNs(d_img, 0.0)
                    d_img = depth_to_net_dim(d_img, robot='HSR')
                    policy_input = np.copy(d_img)
                else:
                    policy_input = np.copy(c_img)

                # --------------------------------------------------------------
                # Run grasp detector to get data=(x,y) point for target, record stats.
                # Note that the web labeler returns a dictionary like this:
                # {'objects': [{'box': (155, 187, 165, 194), 'class': 0}], 'num_labels': 1}
                # but we really want just the 2D grasping point. So use `get_pose()`.
                # Also, for the analytic one, we'll pick the highest point ourselves.
                # --------------------------------------------------------------
                sgraspt = time.time()
                if args.g_type == 'network':
                    data = self.g_detector.predict(policy_input)
                elif args.g_type == 'analytic':
                    data_all = self.wl.label_image(policy_input)
                    data = get_pose(data_all)
                elif args.g_type == 'human':
                    data_all = self.wl.label_image(policy_input)
                    data = get_pose(data_all)
                egraspt = time.time()

                g_predict_t = egraspt - sgraspt
                print("Grasp predict time: {:.2f}".format(g_predict_t))
                self.record_stats(c_img, d_img_raw, data, self.side,
                                  g_predict_t, 'grasp')

                # For safety, we can check image and abort as needed before execution.
                if use_d:
                    img = self.dp.draw_prediction(d_img, data)
                else:
                    img = self.dp.draw_prediction(c_img, data)
                caption = 'G Predicted: {} (ESC to abort, other key to proceed)'.format(
                    data)
                call_wait_key(cv2.imshow(caption, img))

                # --------------------------------------------------------------
                # Broadcast grasp pose, execute the grasp, check for success.
                # We'll use the `find_pick_region_net` since the `data` is the
                # (x,y) pose, and not `find_pick_region_labeler`.
                # --------------------------------------------------------------
                self.gripper.find_pick_region_net(
                    pose=data,
                    c_img=c_img,
                    d_img=d_img_raw,
                    count=self.grasp_count,
                    side=self.side,
                    apply_offset=self.apply_offset)
                pick_found, bed_pick = self.check_card_found()

                if self.side == "BOTTOM":
                    self.whole_body.move_to_go()
                    self.tt.move_to_pose(self.omni_base, 'lower_start')
                    tic = time.time()
                    self.gripper.execute_grasp(bed_pick, self.whole_body,
                                               'head_down')
                    toc = time.time()
                else:
                    self.whole_body.move_to_go()
                    self.tt.move_to_pose(self.omni_base, 'top_mid')
                    tic = time.time()
                    self.gripper.execute_grasp(bed_pick, self.whole_body,
                                               'head_up')
                    toc = time.time()
                self.g_time_stats.append(toc - tic)
                self.check_success_state(policy_input)

    def check_success_state(self, old_grasp_image):
        """
        Checks whether a single grasp in a bed-making trajectory succeeded.
        Depends on which side of the bed the HSR is at. Invokes the learned
        success network policy and transitions the HSR if successful.

        When we record the data, c_img and d_img should be what success net saw.

        UPDATE: now we can pass in the previous `d_img` from the grasping to
        compare the difference. Well, technically the `policy_input` so it can
        handle either case.
        """
        use_d = BED_CFG.SUCC_CONFIG.USE_DEPTH
        if self.side == "BOTTOM":
            result = self.sn.check_bottom_success(use_d, old_grasp_image)
            self.b_grasp_count += 1
        else:
            result = self.sn.check_top_success(use_d, old_grasp_image)
            self.t_grasp_count += 1
        self.grasp_count += 1
        assert self.grasp_count == self.b_grasp_count + self.t_grasp_count

        success = result['success']
        data = result['data']
        c_img = result['c_img']
        d_img = result['d_img']
        d_img_raw = result['d_img_raw']
        s_predict_t = result['s_predict_t']
        img_diff = result['diff_l2']
        img_ssim = result['diff_ssim']
        self.record_stats(c_img, d_img_raw, data, self.side, s_predict_t,
                          'success')

        # We really need a better metric, such as 'structural similarity'.
        # Edit: well, it's probably marginally better, I think.
        # I use an L2 threshold of 98k, and an SSIM threshold of 0.88.

        if BED_CFG.GRASP_CONFIG.USE_DEPTH != BED_CFG.SUCC_CONFIG.USE_DEPTH:
            print("grasp vs success for using depth differ")
            print("for now we'll ignore the offset issue.")
        else:
            print("L2 and SSIM btwn grasp & next image: {:.1f} and {:.3f}".
                  format(img_diff, img_ssim))
            if img_ssim >= 0.875 or img_diff < 85000:
                print("APPLYING OFFSET! (self.apply_offset = True)")
                self.apply_offset = True
            else:
                print("no offset applied (self.apply_offset = False)")
                self.apply_offset = False

        # Have user confirm that this makes sense.
        caption = "Success net saw this and thought: {}. Press any key".format(
            success)
        if use_d:
            call_wait_key(cv2.imshow(caption, d_img))
        else:
            call_wait_key(cv2.imshow(caption, c_img))

        # Limit amount of grasp attempts per side, pretend 'success' anyway.
        lim = BED_CFG.GRASP_ATTEMPTS_PER_SIDE
        if (self.side == 'BOTTOM' and self.b_grasp_count >= lim) or \
                (self.side == 'TOP' and self.t_grasp_count >= lim):
            print("We've hit {} for this side so set success=True".format(lim))
            success = True

        # Handle transitioning to different side
        if success:
            if self.side == "BOTTOM":
                self.transition_to_top()
                self.side = 'TOP'
            else:
                self.transition_to_start()
            print(
                "We're moving to another side so revert self.apply_offset = False."
            )
            self.apply_offset = False
        else:
            self.new_grasp = False

    def transition_to_top(self):
        """Transition to top (not bottom)."""
        transition_time = self.move_to_top_side()
        self.move_time_stats.append(transition_time)

    def transition_to_start(self):
        """Transition to start=bottom, SAVE ROLLOUT STATS, exit program.

        The `rollout_stats` is a list with a bunch of stats recorded via the
        class method `record_stats`. We save with a top-down webcam and save
        before moving back, since the HSR could disconnect.
        """
        # Record the final image for evaluation later (from USB webcam).
        cap = cv2.VideoCapture(0)
        frame = None
        while frame is None:
            ret, frame = cap.read()
        self.image_final = frame
        cv2.imwrite('image_final.png', self.image_final)

        _, frame = cap.read()
        self.image_final2 = frame
        cv2.imwrite('image_final2.png', self.image_final2)

        cap.release()
        print("NOTE! Recorded `image_final` for coverage evaluation.")

        # Append some last-minute stuff to `self.rollout_stats` for saving.
        final_stuff = {
            'image_start': self.image_start,
            'image_final': self.image_final,
            'image_start2': self.image_start2,
            'image_final2': self.image_final2,
            'grasp_times': self.g_time_stats,
            'move_times': self.move_time_stats,
            'args': self.args,  # ADDING THIS! Now we can 'retrace' our steps.
        }
        self.rollout_stats.append(final_stuff)

        # SAVE, move to start, then exit.
        self.com.save_stat(self.rollout_stats, target_path=self.args.save_path)
        self.move_to_start()
        sys.exit()

    def record_stats(self, c_img, d_img, data, side, time, typ):
        """Adds a dictionary to the `rollout_stats` list.

        We can tell it's a 'net' thing due to 'net_pose' and 'net_succ' keys.
        EDIT: argh wish I hadn't done that since this script also handles the
        human and analytic cases. Oh well, too late for that now.
        """
        assert side in ['BOTTOM', 'TOP']
        grasp_point = {}
        grasp_point['c_img'] = c_img
        grasp_point['d_img'] = d_img
        if typ == "grasp":
            grasp_point['net_pose'] = data
            grasp_point['g_net_time'] = time
        elif typ == "success":
            grasp_point['net_succ'] = data
            grasp_point['s_net_time'] = time
        else:
            raise ValueError(typ)
        grasp_point['side'] = side
        grasp_point['type'] = typ
        self.rollout_stats.append(grasp_point)

    def position_head(self):
        """Position head for a grasp.

        Use lower_start_tmp so HSR looks 'sideways'; thus, hand is not in the way.
        """
        self.whole_body.move_to_go()
        if self.side == "BOTTOM":
            self.tt.move_to_pose(self.omni_base, 'lower_start_tmp')
        self.whole_body.move_to_joint_positions(
            {'arm_flex_joint': -np.pi / 16.0})
        self.whole_body.move_to_joint_positions(
            {'head_pan_joint': np.pi / 2.0})
        self.whole_body.move_to_joint_positions({'arm_lift_joint': 0.120})
        self.whole_body.move_to_joint_positions(
            {'head_tilt_joint': -np.pi / 4.0})

    def move_to_top_side(self):
        """Assumes we're at the bottom and want to go to the top."""
        self.whole_body.move_to_go()
        tic = time.time()
        self.tt.move_to_pose(self.omni_base, 'right_down_1')
        self.tt.move_to_pose(self.omni_base, 'right_mid_1')
        self.tt.move_to_pose(self.omni_base, 'right_up_1')
        self.tt.move_to_pose(self.omni_base, 'top_mid_tmp')
        toc = time.time()
        return toc - tic

    def move_to_start(self):
        """Assumes we're at the top and we go back to the start.

        Go to lower_start_tmp to be at the same view as we started with, so that
        we take a c_img and compare coverage.
        """
        self.whole_body.move_to_go()
        tic = time.time()
        self.tt.move_to_pose(self.omni_base, 'right_up_2')
        self.tt.move_to_pose(self.omni_base, 'right_mid_2')
        self.tt.move_to_pose(self.omni_base, 'right_down_2')
        self.tt.move_to_pose(self.omni_base, 'lower_start_tmp')
        toc = time.time()
        return toc - tic

    def check_card_found(self):
        """Looks up the pose for where the HSR's hand should go to."""
        transforms = self.tl.getFrameStrings()
        cards = []
        try:
            for transform in transforms:
                current_grasp = 'bed_' + str(self.grasp_count)
                if current_grasp in transform:
                    print('found {}'.format(current_grasp))
                    f_p = self.tl.lookupTransform('map', transform,
                                                  rospy.Time(0))
                    cards.append(transform)
        except:
            rospy.logerr('bed pick not found yet')
        return True, cards

    def _test_grasp(self):
        """Simple tests for grasping. Don't forget to process depth images.

        Do this independently of any rollout ...
        """
        print("\nNow in `test_grasp` to check grasping net...")
        self.position_head()
        time.sleep(3)

        c_img = self.cam.read_color_data()
        d_img = self.cam.read_depth_data()
        if np.isnan(np.sum(d_img)):
            cv2.patchNaNs(d_img, 0.0)
        d_img = depth_to_net_dim(d_img, robot='HSR')
        pred = self.g_detector.predict(np.copy(d_img))
        img = self.dp.draw_prediction(d_img, pred)

        print("prediction: {}".format(pred))
        caption = 'G Predicted: {} (ESC to abort, other key to proceed)'.format(
            pred)
        cv2.imshow(caption, img)
        key = cv2.waitKey(0)
        if key in ESC_KEYS:
            print("Pressed ESC key. Terminating program...")
            sys.exit()

    def _test_success(self):
        """Simple tests for success net. Don't forget to process depth images.

        Should be done after a grasp test since I don't re-position...  Note: we
        have access to `self.sn` but that isn't the actual net which has a
        `predict`, but it's a wrapper (explained above), but we can access the
        true network via `self.sn.sdect` and from there call `predict`.
        """
        print("\nNow in `test_success` to check success net...")
        time.sleep(3)
        c_img = self.cam.read_color_data()
        d_img = self.cam.read_depth_data()
        if np.isnan(np.sum(d_img)):
            cv2.patchNaNs(d_img, 0.0)
        d_img = depth_to_net_dim(d_img, robot='HSR')
        result = self.sn.sdect.predict(np.copy(d_img))
        result = np.squeeze(result)

        print("s-net pred: {} (if [0]<[1] failure, else success...)".format(
            result))
        caption = 'S Predicted: {} (ESC to abort, other key to proceed)'.format(
            result)
        cv2.imshow(caption, d_img)
        key = cv2.waitKey(0)
        if key in ESC_KEYS:
            print("Pressed ESC key. Terminating program...")
            sys.exit()

    def _test_variables(self):
        """Test to see if TF variables were loaded correctly.
        """
        vars = tf.trainable_variables()
        print("\ntf.trainable_variables:")
        for vv in vars:
            print("  {}".format(vv))
        print("done\n")
Beispiel #13
0
class BedMaker():
    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''

        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()

        if not DEBUG:
            self.com.go_to_initial_state(self.whole_body)

            self.tt = TableTop()
            self.tt.find_table(self.robot)

        self.grasp_count = 0

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        self.gp = GraspPlanner()

        self.gripper = Crane_Gripper(self.gp, self.cam, self.com.Options,
                                     self.robot.get('gripper'))

        print "after thread"

    def find_mean_depth(self, d_img):
        '''
        Evaluates the current policy and then executes the motion 
        specified in the the common class
        '''

        indx = np.nonzero(d_img)

        mean = np.mean(d_img[indx])

        return

    def lego_demo(self):

        self.rollout_data = []
        self.get_new_grasp = True

        if not DEBUG:
            self.position_head()
        b = time.time()
        while True:

            time.sleep(1)  #making sure the robot is finished moving

            a = time.time()
            c_img = self.cam.read_color_data()
            d_img = self.cam.read_depth_data()
            cv2.imwrite("debug_imgs/c_img.png", c_img)
            print "time to get images", time.time() - a
            print "\n new iteration"
            if (not c_img == None and not d_img == None):

                c_ms, dirs, _ = run_connected_components(c_img)
                img = draw(c_img, c_ms, dirs)

                # # IPython.embed()
                for c_m, direction in zip(c_ms, dirs):
                    pose, rot = self.compute_grasp(c_m, direction, d_img)
                    rot -= pi / 2.0
                    print "pose, rot:", pose, rot

                ####DETERMINE WHAT OBJECT TO GRASP

                grasp_name = self.gripper.get_grasp_pose(pose[0],
                                                         pose[1],
                                                         pose[2],
                                                         rot,
                                                         c_img=c_img)

                self.execute_grasp(grasp_name)

                self.whole_body.move_to_go()
                self.position_head()

    def execute_grasp(self, grasp_name):
        self.gripper.open_gripper()

        self.whole_body.end_effector_frame = 'hand_palm_link'

        self.whole_body.move_end_effector_pose(geometry.pose(), grasp_name)

        self.gripper.close_gripper()
        self.whole_body.move_end_effector_pose(geometry.pose(z=-0.1),
                                               grasp_name)

        self.whole_body.move_end_effector_pose(geometry.pose(z=-0.1),
                                               'head_down')

        self.gripper.open_gripper()

    def compute_grasp(self, c_m, direction, d_img):

        if direction:
            rot = 0.0
        else:
            rot = 1.57

        x = c_m[1]
        y = c_m[0]

        z_box = d_img[y - 20:y + 20, x - 20:x + 20]

        z = self.gp.find_mean_depth(z_box)

        return [x, y, z], rot

    def singulate(self, start, end, c_img, d_img):
        # [355.9527559055119, 123.53543307086613, 977.26812500000005] 0.0
        rot = np.pi / 2 + np.arctan2(end[0] - start[0], end[1] - start[1])

        self.gripper.close_gripper()
        # self.go_to_point(start, rot, c_img, d_img)
        # self.go_to_point(end, rot, c_img, d_img)

        y, x = start
        z_box = d_img[y - 20:y + 20, x - 20:x + 20]
        z = self.gp.find_mean_depth(z_box)
        # above_start_pose_name = self.gripper.get_grasp_pose(x,y,z,rot,c_img=c_img)
        start_pose_name = self.gripper.get_grasp_pose(x,
                                                      y,
                                                      z,
                                                      rot,
                                                      c_img=c_img)

        y, x = end
        z_box = d_img[y - 20:y + 20, x - 20:x + 20]
        z = self.gp.find_mean_depth(z_box)
        end_pose_name = self.gripper.get_grasp_pose(x, y, z, rot, c_img=c_img)

        # raw_input("Click enter to move to " + above_start_pose_name)
        # self.whole_body.move_end_effector_pose(geometry.pose(), start_pose_name)
        # raw_input("Click enter to singulate from " + start_pose_name)
        print "singulating", start_pose_name
        self.whole_body.move_end_effector_pose(geometry.pose(z=-0.05),
                                               start_pose_name)
        self.whole_body.move_end_effector_pose(geometry.pose(z=-.01),
                                               start_pose_name)
        # raw_input("Click enter to singulate to " + end_pose_name)
        print "singulating", end_pose_name
        self.whole_body.move_end_effector_pose(geometry.pose(z=-.01),
                                               end_pose_name)

        self.gripper.open_gripper()

    def go_to_point(self, point, rot, c_img, d_img):
        y, x = point
        z_box = d_img[y - 20:y + 20, x - 20:x + 20]
        z = self.gp.find_mean_depth(z_box)
        print "singulation pose:", x, y, z
        pose_name = self.gripper.get_grasp_pose(x, y, z, rot, c_img=c_img)
        raw_input("Click enter to move to " + pose_name)
        self.whole_body.move_end_effector_pose(geometry.pose(), pose_name)

    def position_head(self):

        self.tt.move_to_pose(self.omni_base, 'lower_start')
        self.whole_body.move_to_joint_positions({'head_tilt_joint': -0.8})
class BedMaker():
    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot
        com : The common class for the robot
        cam : An open bincam class

        debug : bool

            A bool to indicate whether or not to display a training set point for
            debuging.

        '''

        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()

        if not DEBUG:
            self.com.go_to_initial_state(self.whole_body)

            self.tt = TableTop()
            self.tt.find_table(self.robot)

        self.wl = Python_Labeler(cam=self.cam)

        self.grasp_count = 0

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        self.ds = data_saver('tpc_rollouts/rollouts')

        self.gp = GraspPlanner()
        self.gripper = Crane_Gripper(self.gp, cam, options,
                                     robot.get('gripper'))

        self.gm = GraspManipulator(self.gp, self.gripper, self.whole_body,
                                   self.omni_base, self.tt)

        print "after thread"

    def find_mean_depth(self, d_img):
        '''
        Evaluates the current policy and then executes the motion
        specified in the the common class
        '''

        indx = np.nonzero(d_img)

        mean = np.mean(d_img[indx])

        return

    def lego_demo(self):

        self.rollout_data = []
        self.get_new_grasp = True

        if not DEBUG:
            self.gm.position_head()

        while True:
            time.sleep(1)  #making sure the robot is finished moving
            c_img = self.cam.read_color_data()
            d_img = self.cam.read_depth_data()
            cv2.imwrite("debug_imgs/c_img.png", c_img)
            print "\n new iteration"

            if (not c_img == None and not d_img == None):
                #label image
                data = self.wl.label_image(c_img)
                c_img = self.cam.read_color_data()

                grasp_boxes = []
                suction_boxes = []
                singulate_boxes = []

                for i in range(data['num_labels']):
                    bbox = data['objects'][i]['box']
                    classnum = data['objects'][i]['class']
                    classname = ['grasp', 'singulate', 'suction',
                                 'quit'][classnum]
                    if classname == "grasp":
                        grasp_boxes.append(bbox)
                    elif classname == "suction":
                        suction_boxes.append(bbox)
                    elif classname == "singulate":
                        singulate_boxes.append(bbox)
                    elif classname == "quit":
                        self.ds.save_rollout()
                        self.gm.move_to_home()
                self.ds.append_data_to_rollout(c_img, data)
                main_mask = crop_img(c_img)
                col_img = ColorImage(c_img)
                workspace_img = col_img.mask_binary(main_mask)

                grasps = []
                viz_info = []
                for i in range(len(grasp_boxes)):
                    bbox = grasp_boxes[i]
                    center_mass, direction = bbox_to_grasp(bbox, c_img, d_img)

                    viz_info.append([center_mass, direction])
                    pose, rot = self.gm.compute_grasp(center_mass, direction,
                                                      d_img)
                    grasps.append(
                        self.gripper.get_grasp_pose(pose[0],
                                                    pose[1],
                                                    pose[2],
                                                    rot,
                                                    c_img=workspace_img.data))

                suctions = []
                for i in range(len(suction_boxes)):
                    suctions.append("compute_suction?")

                if len(grasps) > 0 or len(suctions) > 0:
                    cv2.imwrite(
                        "grasps.png",
                        visualize(workspace_img, [v[0] for v in viz_info],
                                  [v[1] for v in viz_info]))
                    print "running grasps"

                    for grasp in grasps:
                        print "grasping", grasp
                        self.gm.execute_grasp(grasp)
                    print "running suctions"
                    for suction in suctions:
                        print "suctioning", suction
                        #execute suction
                elif len(singulate_boxes) > 0:
                    print("singulating")
                    bbox = singulate_boxes[0]
                    obj_mask = bbox_to_mask(bbox, c_img)
                    start, end = find_singulation(col_img, main_mask, obj_mask)
                    display_singulation(start, end, workspace_img)

                    self.gm.singulate(start, end, c_img, d_img)
                else:
                    print("cleared workspace")

                self.whole_body.move_to_go()
                self.gm.position_head()
class LabelDemo():
    def __init__(self):

        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()

        self.com.go_to_initial_state(self.whole_body)

        self.grasp_count = 0

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        self.gp = GraspPlanner()
        self.gripper = Crane_Gripper(self.gp, self.cam, self.com.Options,
                                     self.robot.get('gripper'))
        self.suction = Suction_Gripper(self.gp, self.cam, self.com.Options,
                                       self.robot.get('suction'))

        self.gm = GraspManipulator(self.gp, self.gripper, self.suction,
                                   self.whole_body, self.omni_base, self.tl)

        self.web = Web_Labeler()
        print "after thread"

    def bbox_to_fg(self, bbox, c_img, col_img):
        obj_mask = crop_img(c_img,
                            bycoords=[bbox[1], bbox[3], bbox[0], bbox[2]])
        obj_workspace_img = col_img.mask_binary(obj_mask)
        fg = obj_workspace_img.foreground_mask(cfg.COLOR_TOL,
                                               ignore_black=True)
        return fg, obj_workspace_img

    def test_bbox_overlap(self, box_a, box_b):
        #bbox has format [xmin, ymin, xmax, ymax]
        if box_a[0] > box_b[2] or box_a[2] < box_b[0]:
            return False
        if box_a[1] > box_b[3] or box_a[3] < box_b[1]:
            return False
        return True

    def find_isolated_objects(self, bboxes, c_img):
        valid_bboxes = []
        for curr_ind in range(len(bboxes)):
            curr_bbox = bboxes[curr_ind]
            overlap = False
            for test_ind in range(curr_ind + 1, len(bboxes)):
                test_bbox = bboxes[test_ind]
                if self.test_bbox_overlap(curr_bbox, test_bbox):
                    overlap = True
                    break
            if not overlap:
                valid_bboxes.append(curr_bbox)
        return valid_bboxes

    def label_demo(self):
        self.gm.position_head()

        time.sleep(3)  #making sure the robot is finished moving

        c_img = self.cam.read_color_data()
        d_img = self.cam.read_depth_data()

        while not (c_img is None or d_img is None):
            path = "/home/autolab/Workspaces/michael_working/siemens_challenge/debug_imgs/web.png"
            cv2.imwrite(path, c_img)
            time.sleep(2)  #make sure new image is written before being read

            # print "\n new iteration"
            main_mask = crop_img(c_img, simple=True)
            col_img = ColorImage(c_img)
            workspace_img = col_img.mask_binary(main_mask)

            labels = self.web.label_image(path)

            obj = labels['objects'][0]
            bbox = obj['box']
            class_label = obj['class']

            #bbox has format [xmin, ymin, xmax, ymax]
            fg, obj_w = self.bbox_to_fg(bbox, c_img, col_img)
            cv2.imwrite("debug_imgs/test.png", obj_w.data)

            groups = get_cluster_info(fg)
            display_grasps(workspace_img, groups)

            group = groups[0]
            pose, rot = self.gm.compute_grasp(group.cm, group.dir, d_img)
            grasp_pose = self.gripper.get_grasp_pose(pose[0],
                                                     pose[1],
                                                     pose[2],
                                                     rot,
                                                     c_img=workspace_img.data)

            self.gm.execute_grasp(grasp_pose, class_num=class_label)

            #reset to start
            self.whole_body.move_to_go()
            self.gm.move_to_home()
            self.gm.position_head()
            time.sleep(3)

            c_img = self.cam.read_color_data()
            d_img = self.cam.read_depth_data()
class LabelDemo():

    def __init__(self):

        self.robot = hsrb_interface.Robot()
        self.br = tf.TransformBroadcaster()
        self.rgbd_map = RGBD2Map(self.br)
        # IPython.embed()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')
        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()

        self.com.go_to_initial_state(self.whole_body)
        self.grasp_count = 0
        self.tl = TransformListener()

        self.gp = GraspPlanner()
        self.gripper = Crane_Gripper(self.gp, self.cam, self.com.Options, self.robot.get('gripper'))
        self.suction = Suction_Gripper(self.gp, self.cam, self.com.Options, self.robot.get('suction'))
        self.gm = GraspManipulator(self.gp, self.gripper, self.suction, self.whole_body, self.omni_base, self.tl)
        self.web = Web_Labeler()

        thread.start_new_thread(self.broadcast_temp_bin,())

        time.sleep(3)
        print "after thread"

    def broadcast_temp_bin(self):
        while True:
            self.br.sendTransform((1, 1, 0.6),
                    tf.transformations.quaternion_from_euler(ai=0.0,aj=1.57,ak=0.0),
                    rospy.Time.now(),
                    'temp_bin',
                    # 'head_rgbd_sensor_link')
                    'map')
            rospy.sleep(1)

    def bbox_to_fg(self, bbox, c_img, col_img):
        obj_mask = crop_img(c_img, bycoords = [bbox[1], bbox[3], bbox[0], bbox[2]])
        obj_workspace_img = col_img.mask_binary(obj_mask)
        fg = obj_workspace_img.foreground_mask(cfg.COLOR_TOL, ignore_black=True)
        return fg, obj_workspace_img

    def test_bbox_overlap(self, box_a, box_b):
        #bbox has format [xmin, ymin, xmax, ymax]
        if box_a[0] > box_b[2] or box_a[2] < box_b[0]:
            return False
        if box_a[1] > box_b[3] or box_a[3] < box_b[1]:
            return False 
        return True 

    def find_isolated_objects(self, bboxes, c_img):
        valid_bboxes = []
        for curr_ind in range(len(bboxes)):
            curr_bbox = bboxes[curr_ind]
            overlap = False  
            for test_ind in range(curr_ind + 1, len(bboxes)):
                test_bbox = bboxes[test_ind]
                if self.test_bbox_overlap(curr_bbox, test_bbox):
                    overlap = True 
                    break 
            if not overlap:
                valid_bboxes.append(curr_bbox)
        return valid_bboxes

    def label_demo(self):
        """ Main method which executes the stuff we're interested in.
        
        Should apply to both the physical and simulated HSRs. Call as `python
        main/test_labeling.py`.
        """
        self.gm.position_head()
        time.sleep(3) #making sure the robot is finished moving
        c_img = self.cam.read_color_data()
        d_img = self.cam.read_depth_data()

        path = "/home/ron/siemens_sim/siemens_challenge/debug_imgs/web.png"
        cv2.imwrite(path, c_img)
        time.sleep(2) #make sure new image is written before being read

        # print "\n new iteration"
        main_mask = crop_img(c_img, simple=True)
        col_img = ColorImage(c_img)
        workspace_img = col_img.mask_binary(main_mask)

        labels = self.web.label_image(path)

        obj = labels['objects'][0]
        bbox = obj['box']
        class_label = obj['class']

        #bbox has format [xmin, ymin, xmax, ymax]
        fg, obj_w = self.bbox_to_fg(bbox, c_img, col_img)
        cv2.imwrite("debug_imgs/test.png", obj_w.data)

        groups = get_cluster_info(fg)
        display_grasps(workspace_img, groups)

        group = groups[0]
        print(d_img)
        pose,rot = self.gm.compute_grasp(group.cm, group.dir, d_img)
        pose = find_pose(pose)
        if pose == None:
            print("unable to find corresponding item.")
            sys.exit()

        a = tf.transformations.quaternion_from_euler(ai=-2.355,aj=-3.14,ak=0.0)
        b = tf.transformations.quaternion_from_euler(ai=0.0,aj=0.0,ak=1.57)

        base_rot = tf.transformations.quaternion_multiply(a,b)

        print("now about to get grasp pose, w/pose: {}, rot: {}".format(pose, rot))
        thread.start_new_thread(self.gripper.loop_broadcast,(pose,base_rot,rot))
        time.sleep(5)
        print("now calling execute_grasp w/grasp_pose: {}".format(grasp_pose))
        # IPython.embed()
        self.gm.execute_grasp("grasp_0")
        self.whole_body.move_end_effector_pose(geometry.pose(),"temp_bin")
        self.gripper.open_gripper()



        #reset to start
        self.whole_body.move_to_go()
        # self.gm.move_to_home()
        self.gm.position_head()
        time.sleep(3)
class FullWebDemo():
    def __init__(self):
        """
        Class to run HSR lego task

        """
        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()

        self.com.go_to_initial_state(self.whole_body)

        self.grasp_count = 0

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        self.gp = GraspPlanner()
        self.gripper = Crane_Gripper(self.gp, self.cam, self.com.Options,
                                     self.robot.get('gripper'))
        self.suction = Suction_Gripper(self.gp, self.cam, self.com.Options,
                                       self.robot.get('suction'))

        self.gm = GraspManipulator(self.gp, self.gripper, self.suction,
                                   self.whole_body, self.omni_base, self.tl)

        self.web = Web_Labeler()
        print "after thread"

    def run_grasp(self, bbox, c_img, col_img, workspace_img, d_img):
        print("grasping a " + cfg.labels[bbox.label])
        #bbox has format [xmin, ymin, xmax, ymax]
        fg, obj_w = bbox.to_mask(c_img, col_img)
        # cv2.imwrite("debug_imgs/test.png", obj_w.data)
        # cv2.imwrite("debug_imgs/test2.png", fg.data)
        groups = get_cluster_info(fg)
        curr_tol = cfg.COLOR_TOL
        while len(groups) == 0 and curr_tol > 10:
            curr_tol -= 5
            #retry with lower tolerance- probably white object
            fg, obj_w = bbox.to_mask(c_img, col_img, tol=curr_tol)
            groups = get_cluster_info(fg)

        if len(groups) == 0:
            print("No object within bounding box")
            return False

        display_grasps(workspace_img, groups)

        group = groups[0]
        pose, rot = self.gm.compute_grasp(group.cm, group.dir, d_img)
        grasp_pose = self.gripper.get_grasp_pose(pose[0],
                                                 pose[1],
                                                 pose[2],
                                                 rot,
                                                 c_img=workspace_img.data)

        self.gm.execute_grasp(grasp_pose, class_num=bbox.label)

    def run_singulate(self, col_img, main_mask, to_singulate, d_img):
        print("singulating")
        singulator = Singulation(col_img, main_mask,
                                 [g.mask for g in to_singulate])
        waypoints, rot, free_pix = singulator.get_singulation()

        singulator.display_singulation()
        self.gm.singulate(waypoints, rot, col_img.data, d_img, expand=True)

    def full_web_demo(self):
        self.gm.position_head()

        time.sleep(3)  #making sure the robot is finished moving

        c_img = self.cam.read_color_data()
        d_img = self.cam.read_depth_data()

        while not (c_img is None or d_img is None):
            path = "/home/autolab/Workspaces/michael_working/siemens_challenge/debug_imgs/web.png"
            cv2.imwrite(path, c_img)
            time.sleep(2)  #make sure new image is written before being read

            # print "\n new iteration"
            main_mask = crop_img(c_img, simple=True)
            col_img = ColorImage(c_img)
            workspace_img = col_img.mask_binary(main_mask)

            labels = self.web.label_image(path)

            objs = labels['objects']
            bboxes = [Bbox(obj['box'], obj['class']) for obj in objs]
            single_objs = find_isolated_objects(bboxes)

            if len(single_objs) > 0:
                to_grasp = select_first_obj(single_objs)
                self.run_grasp(to_grasp, c_img, col_img, workspace_img, d_img)
            else:
                #for accurate singulation should have bboxes for all
                fg_imgs = [box.to_mask(c_img, col_img) for box in bboxes]
                groups = [get_cluster_info(fg[0])[0] for fg in fg_imgs]
                groups = merge_groups(groups, cfg.DIST_TOL)
                self.run_singulate(col_img, main_mask, groups, d_img)

            #reset to start
            self.whole_body.move_to_go()
            self.gm.move_to_home()
            self.gm.position_head()
            time.sleep(3)

            c_img = self.cam.read_color_data()
            d_img = self.cam.read_depth_data()
Beispiel #18
0
class BottlePicker():
    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''

        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()

        # if cfg.USE_WEB_INTERFACE:
        #     self.wl = Web_Labeler()
        # else:
        #     self.wl = Python_Labeler(cam = self.cam)

        self.com.go_to_initial_state(self.whole_body)

        self.tt = TableTop()
        self.tt.find_table(self.robot)

        self.grasp_count = 0

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        self.gp = GraspPlanner()

        self.gripper = Lego_Gripper(self.gp, self.cam, self.com.Options,
                                    self.robot.get('gripper'))

        self.RCNN = Depth_Object("bottle")
        #self.test_current_point()

        #thread.start_new_thread(self.ql.run,())
        print "after thread"

    def find_mean_depth(self, d_img):
        '''
        Evaluates the current policy and then executes the motion 
        specified in the the common class
        '''

        indx = np.nonzero(d_img)

        mean = np.mean(d_img[indx])

        return

    def move_to_top_side(self):
        self.tt.move_to_pose(self.omni_base, 'right_down')
        self.tt.move_to_pose(self.omni_base, 'right_up')

    def bottle_pick(self):

        # self.rollout_data = []
        self.position_head()

        self.move_to_top_side()
        print("ARRIVED AT TOP SIDE")
        time.sleep(2)

        #cycle through positions for a long time (30)
        pose_num = 0
        pose_sequence = ['top_mid_far', 'top_left_far', 'top_mid']
        while pose_num < 30:
            pose_name = pose_sequence[pose_num % len(pose_sequence)]
            self.tt.move_to_pose(self.omni_base, pose_name)
            print("ARRIVED AT POSE " + pose_name)
            pose_num += 1

            c_img = self.cam.read_color_data()
            d_img = self.cam.read_depth_data()
            if (not c_img == None and not d_img == None):
                centers, out_img = self.RCNN.detect(c_img)

                # if self.get_new_grasp:
                #     c_m, dirs = run_connected_components(c_img)
                #     draw(c_img,c_m,dirs)

                #     c_img = self.cam.read_color_data()
                #     d_img = self.cam.read_depth_data()

                #     self.gripper.find_pick_region_cc(c_m[0],dirs[0],c_img,d_img,self.grasp_count)

                # pick_found,bed_pick = self.check_card_found()

                # self.gripper.execute_grasp(bed_pick,self.whole_body,'head_down')

                # self.grasp_count += 1
                # self.whole_body.move_to_go()
                # self.tt.move_to_pose(self.omni_base,'lower_start')
                # time.sleep(1)
                # self.whole_body.move_to_joint_positions({'head_tilt_joint':-0.8})

                print("DETECTED: " + str(centers))
                cv2.imwrite("debug_imgs/debug" + str(pose_num) + ".png",
                            out_img)
            timer.sleep(5)

    def check_card_found(self):

        # try:
        transforms = self.tl.getFrameStrings()

        cards = []

        try:

            for transform in transforms:
                print transform
                current_grasp = 'bed_' + str(self.grasp_count)
                if current_grasp in transform:
                    print 'got here'
                    f_p = self.tl.lookupTransform('map', transform,
                                                  rospy.Time(0))
                    cards.append(transform)

        except:
            rospy.logerr('bed pick not found yet')

        return True, cards

    def position_head(self):

        self.tt.move_to_pose(self.omni_base, 'lower_start')
        self.whole_body.move_to_joint_positions({'head_tilt_joint': -0.8})
class BedMaker():
    def __init__(self, args):
        """For data collection of bed-making, NOT the deployment.

        Assumes we roll out the robot's policy via code (not via human touch).
        This is the 'slower' way where we have the python interface that the
        human clicks on to indicate grasping points. Good news is, our deployment
        code is probably going to be similar to this.

        For joystick: you only need it plugged in for the initial state sampler,
        which (at the moment) we are not even using.
        """
        self.robot = robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()
        self.omni_base = robot.get('omni_base')
        self.whole_body = robot.get('whole_body')
        self.cam = RGBD()
        self.com = COM()
        self.wl = Python_Labeler(cam=self.cam)

        # View mode: STANDARD (the way I was doing earlier), CLOSE (the way they want).
        self.view_mode = cfg.VIEW_MODE

        # Set up initial state, table, etc.
        self.com.go_to_initial_state(self.whole_body)
        self.tt = TableTop()

        # For now, a workaround. Ugly but it should do the job ...
        #self.tt.find_table(robot)
        self.tt.make_fake_ar()
        self.tt.find_table_workaround(robot)

        #self.ins = InitialSampler(self.cam)
        self.side = 'BOTTOM'
        self.grasp_count = 0

        # Bells and whistles; note the 'success check' to check if transitioning
        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()
        self.gp = GraspPlanner()
        self.gripper = Bed_Gripper(self.gp, self.cam, self.com.Options,
                                   robot.get('gripper'))
        self.sc = Success_Check(self.whole_body, self.tt, self.cam,
                                self.omni_base)

        time.sleep(4)
        print(
            "Finished creating BedMaker()! Get the bed set up and run bed-making!"
        )
        if cfg.INS_SAMPLE:
            print("TODO: we don't have sampling code here.")

        # When we start, spin this so we can check the frames. Then un-comment,
        # etc. It's the current hack we have to get around crummy AR marker detection.
        if args.phase == 1:
            print("Now doing rospy.spin() because phase = 1.")
            rospy.spin()

    def bed_make(self):
        """Runs the pipeline for data collection.

        You can run this for multiple bed-making trajectories.
        For now, though, assume one call to this means one trajectory.
        """
        self.rollout_data = []
        self.get_new_grasp = True

        # I think, creates red line in GUI where we adjust the bed to match it.
        # But in general we better fix our sampler before doing this for real.
        # Don't forget to press 'B' on the joystick to get past this screen.
        if cfg.INS_SAMPLE:
            u_c, d_c = self.ins.sample_initial_state()
            self.rollout_data.append([u_c, d_c])

        while True:
            c_img = self.cam.read_color_data()
            d_img = self.cam.read_depth_data()

            if (not c_img.all() == None and not d_img.all() == None):
                if self.get_new_grasp:
                    self.position_head()

                    # Human supervisor labels. data = dictionary of relevant info
                    data = self.wl.label_image(c_img)
                    c_img = self.cam.read_color_data()
                    d_img = self.cam.read_depth_data()
                    self.add_data_point(c_img, d_img, data, self.side, 'grasp')

                    # Broadcasts grasp pose
                    self.gripper.find_pick_region_labeler(
                        data, c_img, d_img, self.grasp_count)

                # Execute the grasp and check for success. But if VIEW_MODE is
                # close, better to reset to a 'nicer' position for base movement.
                pick_found, bed_pick = self.check_card_found()
                if self.side == "BOTTOM":
                    self.whole_body.move_to_go()
                    self.tt.move_to_pose(self.omni_base, 'lower_start')
                    self.gripper.execute_grasp(bed_pick, self.whole_body,
                                               'head_down')
                else:
                    self.whole_body.move_to_go()
                    self.tt.move_to_pose(self.omni_base, 'top_mid')
                    self.gripper.execute_grasp(bed_pick, self.whole_body,
                                               'head_up')
                self.check_success_state()

    def check_success_state(self):
        """
        Checks whether a single grasp in a bed-making trajectory succeeded.
        Depends on which side of the bed the HSR is at. Invokes human supervisor
        and transitions the HSR if successful.
        """
        if self.side == "BOTTOM":
            success, data = self.sc.check_bottom_success(self.wl)
        else:
            success, data = self.sc.check_top_success(self.wl)
        c_img = self.cam.read_color_data()
        d_img = self.cam.read_depth_data()
        self.add_data_point(c_img, d_img, data, self.side, 'success')
        print("WAS SUCCESFUL: {}".format(success))

        # Handle transitioning to different side
        if success:
            if self.side == "BOTTOM":
                self.transition_to_top()
            else:
                self.transition_to_start()
            self.update_side()
            self.grasp_count += 1
            self.get_new_grasp = True
        else:
            self.grasp_count += 1
            # If grasp failure, invokes finding region again and add new data
            self.gripper.find_pick_region_labeler(data, c_img, d_img,
                                                  self.grasp_count)
            self.add_data_point(c_img, d_img, data, self.side, 'grasp')
            self.get_new_grasp = False

    def update_side(self):
        """TODO: extend to multiple side switches?"""
        if self.side == "BOTTOM":
            self.side = "TOP"

    def transition_to_top(self):
        """Transition to top (not bottom)."""
        self.move_to_top_side()

    def transition_to_start(self):
        """Transition to start=bottom, save rollout data, exit program.
        Saves to a supervisor's directory since we're using a supervisor.
        """
        self.com.save_rollout(self.rollout_data)
        self.move_to_start()
        sys.exit()

    def add_data_point(self, c_img, d_img, data, side, typ, pose=None):
        """Adds a dictionary to the `rollout_data` list."""
        grasp_point = {}
        grasp_point['c_img'] = c_img
        grasp_point['d_img'] = d_img
        if pose == None:
            label = data['objects'][0]['box']
            pose = [(label[2] - label[0]) / 2.0 + label[0],
                    (label[3] - label[1]) / 2.0 + label[1]]
        grasp_point['pose'] = pose
        grasp_point['class'] = data['objects'][0]['class']
        grasp_point['side'] = side
        grasp_point['type'] = typ
        self.rollout_data.append(grasp_point)

    def position_head(self):
        """Position the head for a grasp attempt.
        After playing around a bit, I think `head_tilt_joint` should be set last.
        """
        self.whole_body.move_to_go()
        if self.side == "BOTTOM":
            self.tt.move_to_pose(self.omni_base, 'lower_start_tmp')
        self.whole_body.move_to_joint_positions(
            {'arm_flex_joint': -np.pi / 16.0})
        self.whole_body.move_to_joint_positions(
            {'head_pan_joint': np.pi / 2.0})
        self.whole_body.move_to_joint_positions({'arm_lift_joint': 0.120})
        self.whole_body.move_to_joint_positions(
            {'head_tilt_joint': -np.pi / 4.0})

    def move_to_top_side(self):
        """Assumes we're at the bottom and want to go to the top."""
        self.whole_body.move_to_go()
        self.tt.move_to_pose(self.omni_base, 'right_down')
        self.tt.move_to_pose(self.omni_base, 'right_mid')
        self.tt.move_to_pose(self.omni_base, 'right_up')
        self.tt.move_to_pose(self.omni_base, 'top_mid_tmp')

    def move_to_start(self):
        """Assumes we're at the top and we go back to the start."""
        self.whole_body.move_to_go()
        self.tt.move_to_pose(self.omni_base, 'right_up')
        self.tt.move_to_pose(self.omni_base, 'right_mid')
        self.tt.move_to_pose(self.omni_base, 'right_down')
        self.tt.move_to_pose(self.omni_base, 'lower_mid')

    def check_card_found(self):
        """Looks up the pose for where the HSR's hand should go to."""
        transforms = self.tl.getFrameStrings()
        cards = []
        try:
            for transform in transforms:
                current_grasp = 'bed_' + str(self.grasp_count)
                if current_grasp in transform:
                    print('found {}'.format(current_grasp))
                    f_p = self.tl.lookupTransform('map', transform,
                                                  rospy.Time(0))
                    cards.append(transform)
        except:
            rospy.logerr('bed pick not found yet')
        return True, cards
Beispiel #20
0
class Bed_Gripper(object):
    """
    Handles the gripper for bed-making, similar to CraneGripper since it creates
    poses for the robot's end-effector to go to, except for functionality
    related to the neural networks and python labelers.
    """

    def __init__(self,graspPlanner,cam,options,gripper):
        #topic_name = '/hsrb/head_rgbd_sensor/depth_registered/image_raw'
        not_read = True
        while not_read:
            try:
                cam_info = cam.read_info_data()
                if(not cam_info == None):
                    not_read = False
            except:
                rospy.logerr('info not recieved')
        # Bells and whisltes
        self.pcm = PCM()
        self.pcm.fromCameraInfo(cam_info)
        self.br = tf.TransformBroadcaster()
        self.tl = tf.TransformListener()
        self.gp = graspPlanner
        self.gripper = gripper
        self.options = options
        self.com = COM()

        # See paper for details, used to release grasp if too much force.
        self.tension = Tensioner()

        # Side, need to change this.
        self.side = 'BOTTOM'
        self.offset_x = 0.0 # positive means going to other side of bed (both sides!)
        self.offset_z = 0.0 # negative means going UP to ceiling
        self.apply_offset = False


    def compute_trans_to_map(self,norm_pose,rot):
        pose = self.tl.lookupTransform('map','rgbd_sensor_rgb_frame_map', rospy.Time(0))
        M = tf.transformations.quaternion_matrix(pose[1])
        M_t = tf.transformations.translation_matrix(pose[0])
        M[:,3] = M_t[:,3]
        M_g = tf.transformations.quaternion_matrix(rot)
        M_g_t = tf.transformations.translation_matrix(norm_pose)
        M_g[:,3] = M_g_t[:,3] 
        M_T = np.matmul(M,M_g)
        trans = tf.transformations.translation_from_matrix(M_T)
        quat = tf.transformations.quaternion_from_matrix(M_T)
        return trans,quat


    def loop_broadcast(self, norm_pose, rot, count):
        norm_pose, rot = self.compute_trans_to_map(norm_pose, rot)
        gripper_height = cfg.GRIPPER_HEIGHT

        # Bleh ... :-(. Pretend gripper height is shorter.  UPDATE: let's not do
        # this. It's a bad idea. Put tape under table legs for better balance.
        #if self.side == 'TOP':
        #    gripper_height -= 0.015

        # But we may want some extra offset in the x and z directions.
        if self.apply_offset:
            print("self.apply_offset = True")
            self.offset_x = 0.010
            self.offset_z = 0.010
        else:
            print("self.apply_offset = False")
            self.offset_x = 0.0
            self.offset_z = 0.0

        print("In loop_broadcast, broadcasting pose!!")

        while True:
            rospy.sleep(1.0)
            self.br.sendTransform((norm_pose[0], norm_pose[1], norm_pose[2]),
                    #tf.transformations.quaternion_from_euler(ai=0.0,aj=0.0,ak=0.0),
                    rot,
                    rospy.Time.now(),
                    'bed_i_'+str(count),
                    #'head_rgbd_sensor_link')
                    'map')
            self.br.sendTransform((self.offset_x, 0.0, -gripper_height + self.offset_z),
                    tf.transformations.quaternion_from_euler(ai=0.0,aj=0.0,ak=0.0),
                    rospy.Time.now(),
                    'bed_'+str(count),
                    #'head_rgbd_sensor_link')
                    'bed_i_'+str(count))

    
    def broadcast_poses(self,poses,g_count):
        #while True: 
        count = 0
        for pose in poses:
            num_pose = pose[1] # this is [x,y,depth]
            label = pose[0]
            td_points = self.pcm.projectPixelTo3dRay((num_pose[0],num_pose[1]))
            print("\nIn `bed_making.gripper.broadcast_poses()`")
            print("  DE PROJECTED POINTS {}".format(td_points))
            norm_pose = np.array(td_points)
            norm_pose = norm_pose/norm_pose[2]
            norm_pose = norm_pose*(cfg.MM_TO_M*num_pose[2])
            print("  NORMALIZED POINTS {}\n".format(norm_pose))
            #pose = np.array([td_points[0],td_points[1],0.001*num_pose[2]])
            a = tf.transformations.quaternion_from_euler(ai=-2.355,aj=-3.14,ak=0.0)
            b = tf.transformations.quaternion_from_euler(ai=0.0,aj=0.0,ak=1.57)
            c = tf.transformations.quaternion_multiply(a,b)
            thread.start_new_thread(self.loop_broadcast,(norm_pose,c,g_count))
            time.sleep(1.0)
            count += 1


    def convert_crop(self,pose):
        pose[0] = self.options.OFFSET_Y + pose[0]
        pose[1] = self.options.OFFSET_X + pose[1]
        return pose


    def plot_on_true(self, pose, true_img):
        """Another debug helper method, shows the img with cross hair."""
        #pose = self.convert_crop(pose)
        dp = DrawPrediction()
        image = dp.draw_prediction(np.copy(true_img), pose)
        cv2.imshow('label_given',image)
        cv2.waitKey(30)
       

    def find_pick_region_net(self, pose, c_img, d_img, count, side, apply_offset=None):
        """Called during bed-making deployment w/neural network, creates a pose.

        It relies on the raw depth image! DO NOT PASS A PROCESSED DEPTH IMAGE!!!
        Also, shows the image to the user via `plot_on_true`.

        Update: pass in the 'side' as well, because I am getting some weird
        cases where the plane formed by the four table 'corner' frames (head up,
        head down, bottom up, bottom down) seem to be slightly at an angle. Ugh.
        So pretend the gripper height has 'decreased' by 1cm for the other side
        of the bed.

        Args:
            pose: (x,y) point as derived from the grasp detector network
        """
        # temp debugging
        print('find_pick_region_net, here are the transforms')
        transforms = self.tl.getFrameStrings() 
        for transform in transforms:
            print("  {}".format(transform))
            #f_p = self.tl.lookupTransform('map',transform, rospy.Time(0))
        print("done w/transforms\n")
        # end tmp debugging
        
        assert side in ['BOTTOM', 'TOP']
        self.side = side
        self.apply_offset = apply_offset

        poses = []
        p_list = []
        x,y = pose
        print("in bed_making.gripper, PREDICTION {}".format(pose))
        # NOTE for now dont do this I have other ways to visualize
        #self.plot_on_true([x,y], c_img)

        # Crop D+img, take 10x10 area of raw depth
        d_img_c = d_img[int(y-cfg.BOX) : int(y+cfg.BOX) , int(x-cfg.BOX) : int(cfg.BOX+x)]

        depth = self.gp.find_mean_depth(d_img_c)
        poses.append( [1.0, [x,y,depth]] )
        print("pose: {}".format(poses))
        self.broadcast_poses(poses, count)


    def find_pick_region_labeler(self, results, c_img, d_img, count):
        """Called during bed-making data collection, only if we use the labeler.

        It relies on the raw depth image! DO NOT PASS A PROCESSED DEPTH IMAGE!!!
        Also, shows the image to the user via `plot_on_true`.

        NOTE: main difference between this and the other method for the net is
        that the `results` argument encodes the pose implicitly and we have to
        compute it. Otherwise, though, the depth computation is the same, and
        cropping is the same, so the methods do similar stuff.

        Args:
            results: Dict from QueryLabeler class (human supervisor).
        """
        poses = []
        p_list = []

        for result in results['objects']:
            print result
            x_min = float(result['box'][0])
            y_min = float(result['box'][1])
            x_max = float(result['box'][2])
            y_max = float(result['box'][3])
            x = (x_max-x_min)/2.0 + x_min
            y = (y_max - y_min)/2.0 + y_min

            # Will need to test; I assume this requires human intervention.
            if cfg.USE_DART:
                pose = np.array([x,y])
                action_rand = mvn(pose,cfg.DART_MAT)
                print "ACTION RAND ",action_rand
                print "POSE ", pose
                x = action_rand[0]
                y = action_rand[1]
            self.plot_on_true([x,y],c_img)

            #Crop D+img
            d_img_c = d_img[int(y-cfg.BOX) : int(y+cfg.BOX) , int(x-cfg.BOX) : int(cfg.BOX+x)]
            depth = self.gp.find_mean_depth(d_img_c)
            # Note that `result['class']` is an integer (a class index).
            # 0=success, anything else indicates a grasping failure.
            poses.append([result['class'],[x,y,depth]])
        self.broadcast_poses(poses,count)


    def find_pick_region(self,results,c_img,d_img):
        """ From suctioning code, not the bed-making. Ignore it. """
        poses = []
        p_list = []
        for result in results:
            print result
            x = int(result['box'][0])
            y = int(result['box'][1])
            w = int(result['box'][2]/ 2.0)
            h = int(result['box'][3]/ 2.0)
            self.plot_on_true([x,y],c_img_true)
            #Crop D+img
            d_img_c = d_img[y-h:y+h,x-w:x+w]
            depth = self.gp.find_max_depth(d_img_c)
            poses.append([result['class'],self.convert_crop([x,y,depth])])
        self.broadcast_poses(poses)


    def execute_grasp(self, cards, whole_body, direction):
        """ Executes grasp. Move to pose, squeeze, pull (w/tension), open. """
        whole_body.end_effector_frame = 'hand_palm_link'

        # Hmmm ... might help with frequent table bumping? Higher = more arm movement.
        whole_body.linear_weight = 60.0

        whole_body.move_end_effector_pose(geometry.pose(),cards[0])
        self.com.grip_squeeze(self.gripper)

        # Then after we grip, go back to the default value.
        whole_body.linear_weight = 3.0

        # Then we pull.
        self.tension.force_pull(whole_body,direction)
        self.com.grip_open(self.gripper)
class CollectData():
    def __init__(self):
        """ Class to run HSR lego task """
        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()
        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.side = 'BOTTOM'
        self.cam = RGBD()
        self.com = COM()
        # if not DEBUG:
        self.com.go_to_initial_state(self.whole_body)

        #     self.tt = TableTop()
        #     self.tt.find_table(self.robot)

        self.grasp_count = 0

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        self.gp = GraspPlanner()
        self.gripper = Crane_Gripper(self.gp, self.cam, self.com.Options,
                                     self.robot.get('gripper'))
        self.suction = Suction_Gripper(self.gp, self.cam, self.com.Options,
                                       self.robot.get('suction'))

        self.gm = GraspManipulator(self.gp, self.gripper, self.suction,
                                   self.whole_body, self.omni_base)

        self.collision_world = hsrb_interface.collision_world.CollisionWorld(
            "global_collision_world")
        self.collision_world.remove_all()
        self.collision_world.add_box(x=.8,
                                     y=.9,
                                     z=0.5,
                                     pose=geometry.pose(y=1.4, z=0.15),
                                     frame_id='map')
        print("finished initializing collect data class")

    def collect_data(self):
        """ 
		Run this a few times to check that the rgb images are reasonable.
		If not, rearrange the setup and try again. Delete any images saved after
		that, the run this "for real".
		"""
        self.gm.position_head()
        IMDIR_RGB = 'image_rgb/'
        IMDIR_DEPTH = 'image_depth/'
        time.sleep(5)  #making sure the robot is finished moving
        print("after calling gm.position_head() w/several second delay")
        print("\nwhole body joint positions:\n{}".format(
            self.whole_body.joint_positions))

        while True:
            num = len([x for x in os.listdir(IMDIR_RGB) if 'png' in x])
            c_img = self.cam.read_color_data()
            d_img = self.cam.read_depth_data()
            cv2.imshow('rgb/image_raw', c_img)
            cv2.imshow('depth/image_raw', d_img)
            fname1 = IMDIR_RGB + 'rgb_raw_{}.png'.format(str(num).zfill(4))
            fname2 = IMDIR_DEPTH + 'depth_raw_{}.png'.format(str(num).zfill(4))
            cv2.imwrite(fname1, c_img)
            cv2.imwrite(fname2, d_img)
            print("just saved {} and {}. NOW REARRANGE SETUP!!".format(
                fname1, fname2))
            IPython.embed()  #re-arrange setup here
Beispiel #22
0
class BedMaker():
    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''

        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()

        if cfg.USE_WEB_INTERFACE:
            self.wl = Web_Labeler()
        else:
            self.wl = Python_Labeler(self.cam)

        self.com.go_to_initial_state(self.whole_body)

        self.tt = TableTop()
        self.tt.find_table(self.robot)
        self.ins = InitialSampler(self.cam)

        self.grasp_count = 0

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        self.gp = GraspPlanner()

        self.gripper = Bed_Gripper(self.gp, self.cam, self.com.Options,
                                   self.robot.get('gripper'))

        self.g_detector = Analytic_Grasp()

        self.sn = Success_Net(self.whole_body, self.tt, self.cam,
                              self.omni_base)

        c_img = self.cam.read_color_data()

        #self.test_current_point()
        time.sleep(4)
        #thread.start_new_thread(self.ql.run,())
        print "after thread"

    def find_mean_depth(self, d_img):
        '''
        Evaluates the current policy and then executes the motion 
        specified in the the common class
        '''

        indx = np.nonzero(d_img)

        mean = np.mean(d_img[indx])

        return

    def bed_make(self):

        self.rollout_stats = []
        self.get_new_grasp = True

        if cfg.INS_SAMPLE:
            u_c, d_c = self.ins.sample_initial_state()

            self.rollout_stats.append([u_c, d_c])

        self.new_grasp = True
        while True:

            c_img = self.cam.read_color_data()
            d_img = self.cam.read_depth_data()
            if (not c_img == None and not d_img == None):

                if self.new_grasp:
                    self.position_head()
                else:
                    self.new_grasp = True
                time.sleep(3)

                c_img = self.cam.read_color_data()
                d_img = self.cam.read_depth_data()

                #CHANGE HERE
                grasp_factor = 3
                img_small = cv2.resize(np.copy(c_img), (640 / 3, 480 / 3))

                sgraspt = time.time()
                data = self.g_detector.get_grasp(img_small, grasp_factor)
                egraspt = time.time()
                print("Grasp predict time: " + str(egraspt - sgraspt))

                data = 3 * data
                IPython.embed()

                self.record_stats(c_img, d_img, data, self.side, 'grasp')

                self.gripper.find_pick_region_net(data, c_img, d_img,
                                                  self.grasp_count)

                pick_found, bed_pick = self.check_card_found()

                if self.side == "BOTTOM":
                    self.gripper.execute_grasp(bed_pick, self.whole_body,
                                               'head_down')
                else:
                    self.gripper.execute_grasp(bed_pick, self.whole_body,
                                               'head_up')

                self.check_success_state(c_img, d_img)

    def check_success_state(self, c_img, d_img):

        if self.side == "BOTTOM":
            success, data, c_img = self.sn.check_bottom_success(self.wl)
        else:
            success, data, c_img = self.sn.check_top_success(self.wl)

        self.record_stats(c_img, d_img, data, self.side, 'success')

        print "WAS SUCCESFUL: "
        print success
        if (success):

            if self.side == "BOTTOM":
                self.transition_to_top()
            else:
                self.transition_to_start()

            self.update_side()
        else:
            self.new_grasp = False

        self.grasp_count += 1

        if self.grasp_count > cfg.GRASP_OUT:
            self.transition_to_start()

    def update_side(self):

        if self.side == "BOTTOM":
            self.side = "TOP"

    def transition_to_top(self):
        if cfg.DEBUG_MODE:
            self.com.save_stat(self.rollout_stats)
            self.tt.move_to_pose(self.omni_base, 'lower_mid')
            sys.exit()
        else:
            self.move_to_top_side()

    def transition_to_start(self):
        self.com.save_stat(self.rollout_stats)
        self.move_to_start()
        sys.exit()

    def record_stats(self, c_img, d_img, data, side, typ):

        grasp_point = {}

        grasp_point['c_img'] = c_img
        grasp_point['d_img'] = d_img

        if typ == "grasp":
            grasp_point['net_pose'] = data
        else:
            grasp_point['net_trans'] = data

        grasp_point['side'] = side
        grasp_point['type'] = typ

        self.rollout_stats.append(grasp_point)

    def position_head(self):

        if self.side == "TOP":
            self.whole_body.move_to_joint_positions({'head_tilt_joint': -0.8})
        elif self.side == "BOTTOM":
            self.tt.move_to_pose(self.omni_base, 'lower_start')
            self.whole_body.move_to_joint_positions({'head_tilt_joint': -0.8})

    def move_to_top_side(self):

        self.tt.move_to_pose(self.omni_base, 'right_down')

        self.tt.move_to_pose(self.omni_base, 'right_up')

        self.tt.move_to_pose(self.omni_base, 'top_mid')

    def move_to_start(self):

        if self.side == "BOTTOM":
            self.tt.move_to_pose(self.omni_base, 'lower_mid')
        else:

            self.tt.move_to_pose(self.omni_base, 'right_up')

            self.tt.move_to_pose(self.omni_base, 'right_down')
            self.tt.move_to_pose(self.omni_base, 'lower_mid')

    def check_bottom_success(self):

        self.tt.move_to_pose(self.omni_base, 'lower_mid')
        self.whole_body.move_to_joint_positions({'head_tilt_joint': -0.8})

    def check_card_found(self):
        time.sleep(1)
        # try:
        transforms = self.tl.getFrameStrings()

        cards = []

        try:

            for transform in transforms:
                print transform
                current_grasp = 'bed_' + str(self.grasp_count)
                if current_grasp in transform:
                    print 'got here'
                    f_p = self.tl.lookupTransform('map', transform,
                                                  rospy.Time(0))
                    cards.append(transform)

        except:
            rospy.logerr('bed pick not found yet')

        return True, cards
Beispiel #23
0
class BedMaker():

    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''

        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        
        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()



        # if cfg.USE_WEB_INTERFACE:
        #     self.wl = Web_Labeler()
        # else:
        #     self.wl = Python_Labeler(cam = self.cam)


        self.com.go_to_initial_state(self.whole_body)
        
        self.tt = TableTop()
        self.tt.find_table(self.robot)
        
    
        self.grasp_count = 0
      

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()



        self.gp = GraspPlanner()

        self.gripper = Lego_Gripper(self.gp,self.cam,self.com.Options,self.robot.get('gripper'))


        #self.test_current_point()
       
        #thread.start_new_thread(self.ql.run,())
        print "after thread"

       


    def find_mean_depth(self,d_img):
        '''
        Evaluates the current policy and then executes the motion 
        specified in the the common class
        '''

        indx = np.nonzero(d_img)

        mean = np.mean(d_img[indx])

        return


    def bed_make(self):

        self.rollout_data = []
        self.get_new_grasp = True

        self.position_head()
        while True:

            

            time.sleep(2)

            c_img = self.cam.read_color_data()
            d_img = self.cam.read_depth_data()

            
            if(not c_img == None and not d_img == None):


                if self.get_new_grasp:
                   
                    c_m, dirs = run_connected_components(c_img)
                    draw(c_img,c_m,dirs)
                    
                    
                    c_img = self.cam.read_color_data()
                    d_img = self.cam.read_depth_data()
                   

                    self.gripper.find_pick_region_cc(c_m[0],dirs[0],c_img,d_img,self.grasp_count)
        
               
                
                pick_found,bed_pick = self.check_card_found()

                self.gripper.execute_grasp(bed_pick,self.whole_body,'head_down')
                
                self.grasp_count += 1
                self.whole_body.move_to_go()
                self.tt.move_to_pose(self.omni_base,'lower_start')
                time.sleep(1)
                self.whole_body.move_to_joint_positions({'head_tilt_joint':-0.8})
 
    

    def check_card_found(self):

        # try:
        transforms = self.tl.getFrameStrings()
    
        cards = []

        try:
        
            for transform in transforms:
                print transform
                current_grasp = 'bed_'+str(self.grasp_count)
                if current_grasp in transform:
                    print 'got here'
                    f_p = self.tl.lookupTransform('map',transform, rospy.Time(0))
                    cards.append(transform)

        except: 
            rospy.logerr('bed pick not found yet')
                

        return True, cards
    
    def position_head(self):

        self.tt.move_to_pose(self.omni_base,'lower_start')
        self.whole_body.move_to_joint_positions({'head_tilt_joint':-0.8})
Beispiel #24
0
    def __init__(self, args):
        """For deploying the bed-making policy, not for data collection.

        We use all three variants (analytic, human, networks) here due to
        similarities in code structure.
        """
        self.args = args
        DEBUG = True

        # Set up the robot.
        self.robot = robot = hsrb_interface.Robot()
        if DEBUG:
            print("finished: hsrb_interface.Robot()...")
        self.rgbd_map = RGBD2Map()
        self.omni_base = self.robot.get('omni_base')
        if DEBUG:
            print("finished: robot.get(omni_base)...")
        self.whole_body = self.robot.get('whole_body')
        if DEBUG:
            print("finished: robot.get(whole_body)...")
        self.cam = RGBD()
        self.com = COM()
        self.wl = Python_Labeler(cam=self.cam)

        # Set up initial state, table, etc. Don't forget view mode!
        self.view_mode = BED_CFG.VIEW_MODE
        self.com.go_to_initial_state(self.whole_body)
        if DEBUG:
            print("finished: go_to_initial_state() ...")
        self.tt = TableTop()
        if DEBUG:
            print("finished: TableTop()...")

        # For now, a workaround. Ugly but it should do the job ...
        #self.tt.find_table(robot)
        self.tt.make_fake_ar()
        self.tt.find_table_workaround(robot)

        #self.ins = InitialSampler(self.cam)
        self.side = 'BOTTOM'
        self.grasp_count = 0
        self.b_grasp_count = 0
        self.t_grasp_count = 0

        # AH, build the YOLO network beforehand.
        g_cfg = BED_CFG.GRASP_CONFIG
        s_cfg = BED_CFG.SUCC_CONFIG
        self.yc = YOLO_CONV(options=s_cfg)
        self.yc.load_network()

        # Policy for grasp detection, using Deep Imitation Learning.
        # Or, actually, sometimes we will use humans or an analytic version.
        if DEBUG:
            self._test_variables()
        print("\nnow forming the GDetector with type {}".format(args.g_type))
        if args.g_type == 'network':
            self.g_detector = GDetector(g_cfg, BED_CFG, yc=self.yc)
        elif args.g_type == 'analytic':
            self.g_detector = Analytic_Grasp()  # TODO not implemented!
        elif args.g_type == 'human':
            print("Using a human, don't need to have a `g_detector`. :-)")

        if DEBUG:
            self._test_variables()
            print("\nnow making success net")
        self.sn = Success_Net(self.whole_body,
                              self.tt,
                              self.cam,
                              self.omni_base,
                              fg_cfg=s_cfg,
                              bed_cfg=BED_CFG,
                              yc=self.yc)

        # Bells and whistles.
        self.br = TransformBroadcaster()
        self.tl = TransformListener()
        self.gp = GraspPlanner()
        self.gripper = Bed_Gripper(self.gp, self.cam, self.com.Options,
                                   robot.get('gripper'))
        self.dp = DrawPrediction()

        # When we start, do rospy.spin() to check the frames (phase 1). Then re-run.
        # The current hack we have to get around crummy AR marker detection. :-(
        if DEBUG:
            self._test_variables()
        print("Finished with init method")
        time.sleep(4)
        if args.phase == 1:
            print("Now doing rospy.spin() because phase = 1.")
            rospy.spin()

        # For evaluating coverage.
        self.img_start = None
        self.img_final = None
        self.img_start2 = None
        self.img_final2 = None

        # For grasp offsets.
        self.apply_offset = False
Beispiel #25
0
class Lego_Gripper(object):
    def __init__(self, graspPlanner, cam, options, gripper):
        #topic_name = '/hsrb/head_rgbd_sensor/depth_registered/image_raw'

        not_read = True
        while not_read:

            try:
                cam_info = cam.read_info_data()
                if (not cam_info == None):
                    not_read = False
            except:
                rospy.logerr('info not recieved')

        self.pcm = PCM()

        self.pcm.fromCameraInfo(cam_info)
        self.br = tf.TransformBroadcaster()
        self.tl = tf.TransformListener()
        self.gp = graspPlanner
        self.gripper = gripper
        self.options = options
        self.com = COM()

        self.tension = Tensioner()

    def compute_trans_to_map(self, norm_pose, rot):

        pose = self.tl.lookupTransform('map', 'rgbd_sensor_rgb_frame_map',
                                       rospy.Time(0))

        M = tf.transformations.quaternion_matrix(pose[1])
        M_t = tf.transformations.translation_matrix(pose[0])
        M[:, 3] = M_t[:, 3]

        M_g = tf.transformations.quaternion_matrix(rot)
        M_g_t = tf.transformations.translation_matrix(norm_pose)
        M_g[:, 3] = M_g_t[:, 3]

        M_T = np.matmul(M, M_g)

        trans = tf.transformations.translation_from_matrix(M_T)

        quat = tf.transformations.quaternion_from_matrix(M_T)

        return trans, quat

    def loop_broadcast(self, norm_pose, rot, count, rot_object):

        norm_pose, rot = self.compute_trans_to_map(norm_pose, rot)

        while True:
            self.br.sendTransform(
                (norm_pose[0], norm_pose[1], norm_pose[2]),
                #tf.transformations.quaternion_from_euler(ai=0.0,aj=0.0,ak=0.0),
                rot,
                rospy.Time.now(),
                'bed_i_' + str(count),
                #'head_rgbd_sensor_link')
                'map')

            if rot_object:
                self.br.sendTransform(
                    (0.0, 0.0, -cfg.GRIPPER_HEIGHT),
                    tf.transformations.quaternion_from_euler(ai=0.0,
                                                             aj=0.0,
                                                             ak=1.57),
                    rospy.Time.now(),
                    'bed_' + str(count),
                    #'head_rgbd_sensor_link')
                    'bed_i_' + str(count))
            else:
                self.br.sendTransform(
                    (0.0, 0.0, -cfg.GRIPPER_HEIGHT),
                    tf.transformations.quaternion_from_euler(ai=0.0,
                                                             aj=0.0,
                                                             ak=0.0),
                    rospy.Time.now(),
                    'bed_' + str(count),
                    #'head_rgbd_sensor_link')
                    'bed_i_' + str(count))

    def broadcast_poses(self, poses, g_count):
        #while True:

        count = 0

        for pose in poses:

            num_pose = pose[1]
            label = pose[0]

            td_points = self.pcm.projectPixelTo3dRay(
                (num_pose[0], num_pose[1]))
            print "DE PROJECTED POINTS ", td_points
            norm_pose = np.array(td_points)
            norm_pose = norm_pose / norm_pose[2]
            norm_pose = norm_pose * (cfg.MM_TO_M * num_pose[2])
            print "NORMALIZED POINTS ", norm_pose

            #pose = np.array([td_points[0],td_points[1],0.001*num_pose[2]])
            a = tf.transformations.quaternion_from_euler(ai=-2.355,
                                                         aj=-3.14,
                                                         ak=0.0)
            b = tf.transformations.quaternion_from_euler(ai=0.0,
                                                         aj=0.0,
                                                         ak=1.57)

            c = tf.transformations.quaternion_multiply(a, b)

            thread.start_new_thread(self.loop_broadcast,
                                    (norm_pose, c, g_count, label))

            time.sleep(0.3)

            count += 1

    def convert_crop(self, pose):

        pose[0] = self.options.OFFSET_Y + pose[0]
        pose[1] = self.options.OFFSET_X + pose[1]

        return pose

    def plot_on_true(self, pose, true_img):

        #pose = self.convert_crop(pose)

        dp = DrawPrediction()

        image = dp.draw_prediction(np.copy(true_img), pose)

        cv2.imshow('label_given', image)

        cv2.waitKey(30)

    def find_pick_region_net(self, pose, c_img, d_img, count):
        '''
        Evaluates the current policy and then executes the motion 
        specified in the the common class
        '''

        poses = []
        #IPython.embed()

        p_list = []

        y, x = pose
        #Crop D+img
        print "PREDICTION ", pose

        self.plot_on_true([x, y], c_img)

        d_img_c = d_img[y - cfg.BOX:y + cfg.BOX, x - cfg.BOX:cfg.BOX + x]

        depth = self.gp.find_mean_depth(d_img_c)

        poses.append([1.0, [x, y, depth]])

        self.broadcast_poses(poses, count)

    def find_pick_region_cc(self, pose, rot, c_img, d_img, count):
        '''
        Evaluates the current policy and then executes the motion 
        specified in the the common class
        '''

        poses = []
        #IPython.embed()

        p_list = []
        y, x = pose
        self.plot_on_true([x, y], c_img)

        #Crop D+img
        d_img_c = d_img[y - cfg.BOX:y + cfg.BOX, x - cfg.BOX:cfg.BOX + x]

        depth = self.gp.find_mean_depth(d_img_c)

        poses.append([rot, [x, y, depth]])

        self.broadcast_poses(poses, count)

    def find_pick_region_labeler(self, results, c_img, d_img, count):
        '''
        Evaluates the current policy and then executes the motion 
        specified in the the common class
        '''

        poses = []
        #IPython.embed()

        p_list = []
        for result in results['objects']:
            print result

            x_min = float(result['box'][0])
            y_min = float(result['box'][1])
            x_max = float(result['box'][2])
            y_max = float(result['box'][3])

            x = (x_max - x_min) / 2.0 + x_min
            y = (y_max - y_min) / 2.0 + y_min

            if cfg.USE_DART:
                pose = np.array([x, y])
                action_rand = mvn(pose, cfg.DART_MAT)

                print "ACTION RAND ", action_rand
                print "POSE ", pose

                x = action_rand[0]
                y = action_rand[1]

            self.plot_on_true([x, y], c_img)

            #Crop D+img
            d_img_c = d_img[y - cfg.BOX:y + cfg.BOX, x - cfg.BOX:cfg.BOX + x]

            depth = self.gp.find_mean_depth(d_img_c)

            poses.append([result['class'], [x, y, depth]])

        self.broadcast_poses(poses, count)

    def execute_grasp(self, cards, whole_body, direction):

        whole_body.end_effector_frame = 'hand_palm_link'
        nothing = True

        #self.whole_body.move_to_neutral()
        #whole_body.linear_weight = 99.0
        whole_body.move_end_effector_pose(geometry.pose(), cards[0])

        self.com.grip_squeeze(self.gripper)

        whole_body.move_end_effector_pose(geometry.pose(z=-0.1), 'head_down')

        self.com.grip_open(self.gripper)
Beispiel #26
0
class BedMaker():
    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''

        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        #PARAMETERS TO CHANGE
        self.side = 'TOP'

        self.r_count = 0

        self.grasp_count = 0
        self.success_count = 0

        self.true_count = 0
        self.grasp = True

        self.r_count = self.get_rollout_number()

        self.cam = RGBD()
        self.com = COM()

        self.joystick = JoyStick_X(self.com)

        if cfg.USE_WEB_INTERFACE:
            self.wl = Web_Labeler()
        else:
            self.wl = Python_Labeler(cam=self.cam)

        self.com.go_to_initial_state(self.whole_body)

        self.tt = TableTop()
        self.tt.find_table(self.robot)
        self.position_head()

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        #self.test_current_point()
        time.sleep(4)

        #thread.start_new_thread(self.ql.run,())
        print "after thread"

    def get_rollout_number(self):

        if self.side == "BOTTOM":
            rollouts = glob.glob(cfg.FAST_PATH + 'b_grasp/*.png')
        else:
            rollouts = glob.glob(cfg.FAST_PATH + 't_grasp/*.png')
        r_nums = []
        for r in rollouts:

            a = r[56:]

            i = a.find('_')

            r_num = int(a[:i])

            r_nums.append(r_num)

        return max(r_nums) + 1

    def position_head(self):

        if self.side == "TOP":
            self.tt.move_to_pose(self.omni_base, 'right_down')
            self.tt.move_to_pose(self.omni_base, 'right_up')

            self.tt.move_to_pose(self.omni_base, 'top_mid')
            self.whole_body.move_to_joint_positions({'head_tilt_joint': -0.8})
        elif self.side == "BOTTOM":
            self.tt.move_to_pose(self.omni_base, 'lower_start')
            self.whole_body.move_to_joint_positions({'head_tilt_joint': -0.8})

    def collect_data_bed(self):

        while True:
            c_img = self.cam.read_color_data()
            d_img = self.cam.read_depth_data()

            cv2.imshow('video_feed', c_img)
            cv2.waitKey(30)

            cur_recording = self.joystick.get_record_actions_passive()
            if (cur_recording[0] < -0.1 and self.true_count % 20 == 0):
                print "PHOTO SNAPPED "
                self.save_image(c_img)

                if self.grasp:
                    self.grasp_count += 1
                    self.grasp = False
                else:
                    self.success_count += 1
                    self.grasp = True

            if (cur_recording[1] < -0.1 and self.true_count % 20 == 0):
                print "ROLLOUT DONE "
                self.r_count += 1
                self.grasp_count = 0
                self.success_count = 0
                self.grasp = True

            self.true_count += 1

    def save_image(self, c_img):

        if self.side == "BOTTOM":
            if self.grasp:
                cv2.imwrite(
                    cfg.FAST_PATH + 'b_grasp/frame_' + str(self.r_count) +
                    '_' + str(self.grasp_count) + '.png', c_img)
            else:
                cv2.imwrite(
                    cfg.FAST_PATH + 'b_success/frame_' + str(self.r_count) +
                    '_' + str(self.success_count) + '.png', c_img)

        else:
            if self.grasp:
                cv2.imwrite(
                    cfg.FAST_PATH + 't_grasp/frame_' + str(self.r_count) +
                    '_' + str(self.grasp_count) + '.png', c_img)
            else:
                cv2.imwrite(
                    cfg.FAST_PATH + 't_success/frame_' + str(self.r_count) +
                    '_' + str(self.success_count) + '.png', c_img)
Beispiel #27
0
class BedMaker():
    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''

        self.robot = hsrb_interface.Robot()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()

        if cfg.USE_WEB_INTERFACE:
            self.wl = Web_Labeler()
        else:
            self.wl = Python_Labeler(self.cam)

        self.com.go_to_initial_state(self.whole_body)

        self.tt = TableTop()
        self.tt.find_table(self.robot)

        self.grasp_count = 0

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()
        self.gp = GraspPlanner()

        self.gripper = Bed_Gripper(self.gp, self.cam, self.com.Options,
                                   self.robot.get('gripper'))

        self.sc = Success_Check(self.whole_body, self.tt, self.cam,
                                self.omni_base)

        #self.test_current_point()
        time.sleep(4)
        #thread.start_new_thread(self.ql.run,())
        print "after thread"

    def find_mean_depth(self, d_img):
        '''
        Evaluates the current policy and then executes the motion 
        specified in the the common class
        '''

        indx = np.nonzero(d_img)

        mean = np.mean(d_img[indx])

        return

    def bed_pick(self):

        while True:

            c_img = self.cam.read_color_data()
            d_img = self.cam.read_depth_data()
            if (not c_img == None and not d_img == None):

                c_img = self.cam.read_color_data()
                d_img = self.cam.read_depth_data()

                data = self.wl.label_image(c_img)

                self.gripper.find_pick_region_labeler(data, c_img, d_img,
                                                      self.grasp_count)

                pick_found, bed_pick = self.check_card_found()

                self.grasp_count += 1

                if (pick_found):
                    if (self.side == 'BOTTOM'):
                        self.gripper.execute_grasp(bed_pick, self.whole_body,
                                                   'head_down')
                        success = self.sc.check_bottom_success(self.wl)

                        print "WAS SUCCESFUL: "
                        print success
                        if (success):
                            self.move_to_top_side()
                            self.side = "TOP"

                    elif (self.side == "TOP"):
                        self.gripper.execute_grasp(bed_pick, self.whole_body,
                                                   'head_up')
                        success = self.sc.check_top_success(self.wl)

                        print "WAS SUCCESFUL: "
                        print success

                        if (success):
                            self.side == "PILLOW"

    def test_current_point(self):

        self.gripper.tension.force_pull(self.whole_body, (0, 1, 0))
        self.gripper.com.grip_open(self.gripper)
        self.move_to_top_side()

    def move_to_top_side(self):

        self.tt.move_to_pose(self.omni_base, 'right_down')
        self.tt.move_to_pose(self.omni_base, 'right_mid')

        self.tt.move_to_pose(self.omni_base, 'right_up')

        self.tt.move_to_pose(self.omni_base, 'top_mid')

    def check_bottom_success(self):

        self.tt.move_to_pose(self.omni_base, 'lower_mid')
        self.whole_body.move_to_joint_positions({'head_tilt_joint': -0.8})

    def check_card_found(self):

        # try:
        transforms = self.tl.getFrameStrings()

        cards = []

        try:

            for transform in transforms:
                print transform
                current_grasp = 'bed_' + str(self.grasp_count)
                if current_grasp in transform:
                    print 'got here'
                    f_p = self.tl.lookupTransform('head_rgbd_sensor_rgb_frame',
                                                  transform, rospy.Time(0))
                    cards.append(transform)

        except:
            rospy.logerr('bed pick not found yet')

        return True, cards
from il_ros_hsr.core.python_labeler import Python_Labeler

from il_ros_hsr.p_pi.bed_making.check_success import Success_Check
from il_ros_hsr.p_pi.bed_making.self_supervised import Self_Supervised
import il_ros_hsr.p_pi.bed_making.config_bed as cfg
import cPickle as pickle
import os
import glob
from il_ros_hsr.core.rgbd_to_map import RGBD2Map

from fast_grasp_detect.data_aug.draw_cross_hair import DrawPrediction

dp = DrawPrediction()
#latest, 46-49 from rollout_dart
sm = 0
com = COM()
wl = Python_Labeler()

# Meant to be run _after_ the `collect_data_bed_fast.py` script which saves these images.
for rnum in range(60, 65):
    # path = cfg.STAT_PATH+'stat_' + str(rnum) + '/rollout.p'

    b_grasps = glob.glob(cfg.FAST_PATH + 'b_grasp/*_' + str(rnum) + '_*')
    b_success = glob.glob(cfg.FAST_PATH + 'b_success/*_' + str(rnum) + '_*')

    t_grasps = glob.glob(cfg.FAST_PATH + 't_grasp/*_' + str(rnum) + '_*')
    t_success = glob.glob(cfg.FAST_PATH + 't_success/*_' + str(rnum) + '_*')

    rollout_data = []
    print "------GRASP-----------"
    for grasp in b_grasps:
		offsets = np.array([-(OFFSET+TABLE_LENGTH/2.0), (2*OFFSET+TABLE_WIDTH), 0.0])
		self.make_new_pose(offsets,'bin_drop')






if __name__ == "__main__":

	robot = hsrb_interface.Robot()

	omni_base = robot.get('omni_base')
	whole_body = robot.get('whole_body')

	com = COM()
	com.go_to_initial_state(whole_body)


	tt = Workspace()
	tt.register(robot)


	
	IPython.embed()
	tt.move_to_pose(omni_base,'lower_mid')
	# tt.move_to_pose(omni_base,'right_corner')
	# tt.move_to_pose(omni_base,'right_mid')
	

class LegoDemo():
    def __init__(self):
        """
        Class to run HSR lego task

        """

        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()
        # if not DEBUG:
        self.com.go_to_initial_state(self.whole_body)

        #     self.tt = TableTop()
        #     self.tt.find_table(self.robot)

        self.grasp_count = 0

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        self.gp = GraspPlanner()
        self.gripper = Crane_Gripper(self.gp, self.cam, self.com.Options,
                                     self.robot.get('gripper'))
        self.suction = Suction_Gripper(self.gp, self.cam, self.com.Options,
                                       self.robot.get('suction'))

        self.gm = GraspManipulator(self.gp, self.gripper, self.suction,
                                   self.whole_body, self.omni_base)

        self.collision_world = hsrb_interface.collision_world.CollisionWorld(
            "global_collision_world")
        self.collision_world.remove_all()
        self.collision_world.add_box(x=.8,
                                     y=.9,
                                     z=0.5,
                                     pose=geometry.pose(y=1.4, z=0.15),
                                     frame_id='map')

        print "after thread"

    def find_mean_depth(self, d_img):
        """
        Evaluates the current policy and then executes the motion
        specified in the the common class
        """

        indx = np.nonzero(d_img)

        mean = np.mean(d_img[indx])

        return

    def get_success(self, action):
        """
        Parameters
        ----------
        action : str
            action to query success of
        Returns
        -------
        :str: y or n
        """
        if cfg.COLLECT_DATA and cfg.QUERY:
            print("Was " + action + " successful? (y or n)")
            succ = raw_input()
            while not (succ == "y" or succ == "n"):
                print("Enter only y or n to indicate success of " + action)
                succ = raw_input()
            return succ
        else:
            return "no data queried"

    def get_int(self):
        """
        Returns
        -------
        int
        """
        if cfg.COLLECT_DATA and cfg.QUERY:
            print("How many legos are on the table?")
            inp = raw_input()
            while not inp.isdigit():
                print("Enter an integer.")
                inp = raw_input()
            return inp
        else:
            return "no data queried"

    def get_str(self):
        """
        Returns
        -------
        :str: arbitary value
        """
        if cfg.COLLECT_DATA and cfg.QUERY:
            print(
                "Any notes? (pushed legos off table, ran into table, etc.) (no if none)"
            )
            inp = raw_input()
            return inp
        else:
            return "no data queried"

    def run_singulation(self, col_img, main_mask, d_img, to_singulate):
        """
        Parameters
        ----------
        col_img : `ColorImage`
        main_mask : `BinaryImage`
        d_img : 'DepthImage'
        to_singulate : list of `Group`
        """
        print("SINGULATING")

        self.dm.update_traj("action", "singulate")

        a = time.time()
        singulator = Singulation(col_img, main_mask,
                                 [g.mask for g in to_singulate])
        waypoints, rot, free_pix = singulator.get_singulation()
        self.dm.update_traj("compute_singulate_time", time.time() - a)

        singulator.display_singulation()

        self.dm.update_traj("singulate_info", (waypoints, rot, free_pix))
        self.dm.update_traj("singulate_successes", "crashed")
        self.dm.update_traj("execute_singulate_time", "crashed")
        self.dm.overwrite_traj()

        a = time.time()
        self.gm.singulate(waypoints, rot, col_img.data, d_img, expand=True)
        self.dm.update_traj("execute_singulate_time", time.time() - a)
        self.dm.update_traj("singulate_success",
                            self.get_success("singulation"))
        self.dm.overwrite_traj()

    def run_grasps(self, workspace_img, to_grasp):
        """
        Parameters
        ----------
        workspace_img : `ColorImage`
        to_grasp : list of tuple of:
            (`Group`, grasp_pose, suction_pose, class_num)
        """

        print("GRASPING")

        #impose ordering on grasps (by closest/highest y first)
        to_grasp.sort(key=lambda g: -1 * g[0].cm[0])

        self.dm.update_traj("action", "grasp")

        self.dm.update_traj("all_grasps_info",
                            [(c[0].cm, c[0].dir, c[0].mask.data, c[3])
                             for c in to_grasp])

        if not cfg.CHAIN_GRASPS:
            to_grasp = to_grasp[0:1]

        self.dm.update_traj("run_grasps_info",
                            [(c[0].cm, c[0].dir, c[0].mask.data, c[3])
                             for c in to_grasp])

        display_grasps(workspace_img, [g[0] for g in to_grasp])

        successes = ["not attempted" for i in range(len(to_grasp))]
        correct_colors = ["not attempted" for i in range(len(to_grasp))]
        times = ["not attempted" for i in range(len(to_grasp))]

        for i in range(len(to_grasp)):
            print "grasping", to_grasp[i][1]

            successes[i] = "crashed"
            correct_colors[i] = "crashed"
            times[i] = "crashed"
            self.dm.update_traj("grasp_successes", successes)
            self.dm.update_traj("grasp_colors", correct_colors)
            self.dm.update_traj("execute_grasp_times", times)
            self.dm.overwrite_traj()

            a = time.time()
            self.gm.execute_grasp(to_grasp[i][1], class_num=to_grasp[i][3])
            # self.gm.execute_suction(to_grasp[i][2], to_grasp[i][3])

            times[i] = time.time() - a
            successes[i] = self.get_success("grasp")
            correct_colors[i] = self.get_success("correct color")

            self.dm.update_traj("grasp_successes", successes)
            self.dm.update_traj("grasp_colors", correct_colors)
            self.dm.update_traj("execute_grasp_times", times)
            self.dm.overwrite_traj()

    def clusters_to_actions(self, groups, col_img, d_img, workspace_img):
        """
        Parameters
        ----------
        groups : list of `Group`
        col_img : `ColorImage`
        d_img : `DepthImage`
        workspace_img : `ColorImage`

        Returns
        -------
        list of tuples of form:
            (`Group`, grasp_pose, suction_pose, class_num)
        list of `Group`
        """
        find_grasps_times = []
        compute_grasps_times = []
        find_hsv_times = []

        to_singulate = []
        to_grasp = []
        for group in groups:
            a = time.time()
            inner_groups = grasps_within_pile(col_img.mask_binary(group.mask))
            find_grasps_times.append(time.time() - a)

            if len(inner_groups) == 0:
                to_singulate.append(group)
            else:
                for in_group in inner_groups:
                    a = time.time()
                    pose, rot = self.gm.compute_grasp(in_group.cm,
                                                      in_group.dir, d_img)
                    grasp_pose = self.gripper.get_grasp_pose(
                        pose[0],
                        pose[1],
                        pose[2],
                        rot,
                        c_img=workspace_img.data)
                    suction_pose = self.suction.get_grasp_pose(
                        pose[0],
                        pose[1],
                        pose[2],
                        rot,
                        c_img=workspace_img.data)
                    compute_grasps_times.append(time.time() - a)

                    a = time.time()
                    class_num = hsv_classify(col_img.mask_binary(
                        in_group.mask))
                    find_hsv_times.append(time.time() - a)

                    to_grasp.append(
                        (in_group, grasp_pose, suction_pose, class_num))
        self.dm.update_traj("compute_grasps_times", compute_grasps_times)
        self.dm.update_traj("find_grasps_times", find_grasps_times)
        self.dm.update_traj("find_hsv_times", find_hsv_times)

        return to_grasp, to_singulate

    def lego_demo(self):
        if not cfg.COLLECT_DATA:
            print("WARNING: NO DATA IS BEING COLLECTED")
            print("TO COLLECT DATA, CHANGE COLLECT_DATA IN config_tpc")

        self.dm = DataManager()
        self.get_new_grasp = True

        # DEBUG = False
        # if not DEBUG:
        #     self.gm.position_head()

        time.sleep(3)  #making sure the robot is finished moving
        c_img = self.cam.read_color_data()
        d_img = self.cam.read_depth_data()

        while not (c_img is None or d_img is None):
            print "\n new iteration"

            main_mask = crop_img(c_img, use_preset=True, arc=False)
            col_img = ColorImage(c_img)
            workspace_img = col_img.mask_binary(main_mask)

            self.dm.clear_traj()
            self.dm.update_traj("c_img", c_img)
            self.dm.update_traj("d_img", d_img)
            self.dm.update_traj("stop_condition", "crash")
            cv2.imwrite("debug_imgs/c_img.png", c_img)
            self.dm.update_traj("crop", workspace_img.data)

            a = time.time()
            groups = run_connected_components(workspace_img, viz=True)
            self.dm.update_traj("connected_components_time", time.time() - a)

            self.dm.update_traj("num_legos", self.get_int())

            self.dm.append_traj()

            print "num masses", len(groups)
            if len(groups) == 0:
                print("cleared workspace")
                self.dm.update_traj("stop_condition",
                                    self.get_success("clearing table"))
                self.dm.overwrite_traj()
                time.sleep(5)
                break

            to_grasp, to_singulate = self.clusters_to_actions(
                groups, col_img, d_img, workspace_img)

            self.whole_body.collision_world = self.collision_world
            if len(to_grasp) > 0:
                self.run_grasps(workspace_img, to_grasp)
            else:
                self.run_singulation(col_img, main_mask, d_img, to_singulate)

            self.dm.update_traj("notes", self.get_str())

            #reset to start
            self.whole_body.move_to_go()
            self.gm.move_to_home()
            self.gm.position_head()

            time.sleep(8)  #making sure the robot is finished moving
            c_img = self.cam.read_color_data()
            d_img = self.cam.read_depth_data()

            self.dm.update_traj("stop_condition", "none")
            self.dm.update_traj("c_img_result", c_img)
            self.dm.update_traj("d_img_result", d_img)
            self.dm.overwrite_traj()