コード例 #1
0
    def __init__(self):

        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()

        self.com.go_to_initial_state(self.whole_body)

        self.grasp_count = 0

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        self.gp = GraspPlanner()
        self.gripper = Crane_Gripper(self.gp, self.cam, self.com.Options,
                                     self.robot.get('gripper'))
        self.suction = Suction_Gripper(self.gp, self.cam, self.com.Options,
                                       self.robot.get('suction'))

        self.gm = GraspManipulator(self.gp, self.gripper, self.suction,
                                   self.whole_body, self.omni_base, self.tl)

        self.web = Web_Labeler()
        print "after thread"
コード例 #2
0
    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot
        com : The common class for the robot
        cam : An open bincam class

        debug : bool

            A bool to indicate whether or not to display a training set point for
            debuging.

        '''

        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()

        if not DEBUG:
            self.com.go_to_initial_state(self.whole_body)

            self.tt = TableTop()
            self.tt.find_table(self.robot)

        self.wl = Python_Labeler(cam=self.cam)

        self.grasp_count = 0

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        self.ds = data_saver('tpc_rollouts/rollouts')

        self.gp = GraspPlanner()
        self.gripper = Crane_Gripper(self.gp, cam, options,
                                     robot.get('gripper'))

        self.gm = GraspManipulator(self.gp, self.gripper, self.whole_body,
                                   self.omni_base, self.tt)

        print "after thread"
コード例 #3
0
    def __init__(self):
        """
        Class to run HSR lego task

        """

        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()
        # if not DEBUG:
        self.com.go_to_initial_state(self.whole_body)

        #     self.tt = TableTop()
        #     self.tt.find_table(self.robot)

        self.grasp_count = 0

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        self.gp = GraspPlanner()
        self.gripper = Crane_Gripper(self.gp, self.cam, self.com.Options,
                                     self.robot.get('gripper'))
        self.suction = Suction_Gripper(self.gp, self.cam, self.com.Options,
                                       self.robot.get('suction'))

        self.gm = GraspManipulator(self.gp, self.gripper, self.suction,
                                   self.whole_body, self.omni_base)

        self.collision_world = hsrb_interface.collision_world.CollisionWorld(
            "global_collision_world")
        self.collision_world.remove_all()
        self.collision_world.add_box(x=.8,
                                     y=.9,
                                     z=0.5,
                                     pose=geometry.pose(y=1.4, z=0.15),
                                     frame_id='map')

        print "after thread"
コード例 #4
0
    def __init__(self):
        """
        Class to run HSR lego task

        """
        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()

        self.com.go_to_initial_state(self.whole_body)

        self.grasp_count = 0
        self.helper = Helper(cfg)

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        self.gp = GraspPlanner()
        self.gripper = Crane_Gripper(self.gp, self.cam, self.com.Options,
                                     self.robot.get('gripper'))
        self.suction = Suction_Gripper(self.gp, self.cam, self.com.Options,
                                       self.robot.get('suction'))

        self.gm = GraspManipulator(self.gp, self.gripper, self.suction,
                                   self.whole_body, self.omni_base, self.tl)

        self.dl = DataLogger("stats_data/model_base", cfg.EVALUATE)

        self.web = Web_Labeler(cfg.NUM_ROBOTS_ON_NETWORK)

        model_path = 'main/output_inference_graph.pb'
        label_map_path = 'main/object-detection.pbtxt'
        self.det = Detector(model_path, label_map_path)

        print "after thread"
コード例 #5
0
class BedMaker():
    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''

        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()

        if not DEBUG:
            self.com.go_to_initial_state(self.whole_body)

            self.tt = TableTop()
            self.tt.find_table(self.robot)

        self.grasp_count = 0

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        self.gp = GraspPlanner()

        self.gripper = Crane_Gripper(self.gp, self.cam, self.com.Options,
                                     self.robot.get('gripper'))

        print "after thread"

    def find_mean_depth(self, d_img):
        '''
        Evaluates the current policy and then executes the motion 
        specified in the the common class
        '''

        indx = np.nonzero(d_img)

        mean = np.mean(d_img[indx])

        return

    def lego_demo(self):

        self.rollout_data = []
        self.get_new_grasp = True

        if not DEBUG:
            self.position_head()
        b = time.time()
        while True:

            time.sleep(1)  #making sure the robot is finished moving

            a = time.time()
            c_img = self.cam.read_color_data()
            d_img = self.cam.read_depth_data()
            cv2.imwrite("debug_imgs/c_img.png", c_img)
            print "time to get images", time.time() - a
            print "\n new iteration"
            if (not c_img == None and not d_img == None):

                c_ms, dirs, _ = run_connected_components(c_img)
                img = draw(c_img, c_ms, dirs)

                # # IPython.embed()
                for c_m, direction in zip(c_ms, dirs):
                    pose, rot = self.compute_grasp(c_m, direction, d_img)
                    rot -= pi / 2.0
                    print "pose, rot:", pose, rot

                ####DETERMINE WHAT OBJECT TO GRASP

                grasp_name = self.gripper.get_grasp_pose(pose[0],
                                                         pose[1],
                                                         pose[2],
                                                         rot,
                                                         c_img=c_img)

                self.execute_grasp(grasp_name)

                self.whole_body.move_to_go()
                self.position_head()

    def execute_grasp(self, grasp_name):
        self.gripper.open_gripper()

        self.whole_body.end_effector_frame = 'hand_palm_link'

        self.whole_body.move_end_effector_pose(geometry.pose(), grasp_name)

        self.gripper.close_gripper()
        self.whole_body.move_end_effector_pose(geometry.pose(z=-0.1),
                                               grasp_name)

        self.whole_body.move_end_effector_pose(geometry.pose(z=-0.1),
                                               'head_down')

        self.gripper.open_gripper()

    def compute_grasp(self, c_m, direction, d_img):

        if direction:
            rot = 0.0
        else:
            rot = 1.57

        x = c_m[1]
        y = c_m[0]

        z_box = d_img[y - 20:y + 20, x - 20:x + 20]

        z = self.gp.find_mean_depth(z_box)

        return [x, y, z], rot

    def singulate(self, start, end, c_img, d_img):
        # [355.9527559055119, 123.53543307086613, 977.26812500000005] 0.0
        rot = np.pi / 2 + np.arctan2(end[0] - start[0], end[1] - start[1])

        self.gripper.close_gripper()
        # self.go_to_point(start, rot, c_img, d_img)
        # self.go_to_point(end, rot, c_img, d_img)

        y, x = start
        z_box = d_img[y - 20:y + 20, x - 20:x + 20]
        z = self.gp.find_mean_depth(z_box)
        # above_start_pose_name = self.gripper.get_grasp_pose(x,y,z,rot,c_img=c_img)
        start_pose_name = self.gripper.get_grasp_pose(x,
                                                      y,
                                                      z,
                                                      rot,
                                                      c_img=c_img)

        y, x = end
        z_box = d_img[y - 20:y + 20, x - 20:x + 20]
        z = self.gp.find_mean_depth(z_box)
        end_pose_name = self.gripper.get_grasp_pose(x, y, z, rot, c_img=c_img)

        # raw_input("Click enter to move to " + above_start_pose_name)
        # self.whole_body.move_end_effector_pose(geometry.pose(), start_pose_name)
        # raw_input("Click enter to singulate from " + start_pose_name)
        print "singulating", start_pose_name
        self.whole_body.move_end_effector_pose(geometry.pose(z=-0.05),
                                               start_pose_name)
        self.whole_body.move_end_effector_pose(geometry.pose(z=-.01),
                                               start_pose_name)
        # raw_input("Click enter to singulate to " + end_pose_name)
        print "singulating", end_pose_name
        self.whole_body.move_end_effector_pose(geometry.pose(z=-.01),
                                               end_pose_name)

        self.gripper.open_gripper()

    def go_to_point(self, point, rot, c_img, d_img):
        y, x = point
        z_box = d_img[y - 20:y + 20, x - 20:x + 20]
        z = self.gp.find_mean_depth(z_box)
        print "singulation pose:", x, y, z
        pose_name = self.gripper.get_grasp_pose(x, y, z, rot, c_img=c_img)
        raw_input("Click enter to move to " + pose_name)
        self.whole_body.move_end_effector_pose(geometry.pose(), pose_name)

    def position_head(self):

        self.tt.move_to_pose(self.omni_base, 'lower_start')
        self.whole_body.move_to_joint_positions({'head_tilt_joint': -0.8})
コード例 #6
0
class LabelDemo():
    def __init__(self):

        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()

        self.com.go_to_initial_state(self.whole_body)

        self.grasp_count = 0

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        self.gp = GraspPlanner()
        self.gripper = Crane_Gripper(self.gp, self.cam, self.com.Options,
                                     self.robot.get('gripper'))
        self.suction = Suction_Gripper(self.gp, self.cam, self.com.Options,
                                       self.robot.get('suction'))

        self.gm = GraspManipulator(self.gp, self.gripper, self.suction,
                                   self.whole_body, self.omni_base, self.tl)

        self.web = Web_Labeler()
        print "after thread"

    def bbox_to_fg(self, bbox, c_img, col_img):
        obj_mask = crop_img(c_img,
                            bycoords=[bbox[1], bbox[3], bbox[0], bbox[2]])
        obj_workspace_img = col_img.mask_binary(obj_mask)
        fg = obj_workspace_img.foreground_mask(cfg.COLOR_TOL,
                                               ignore_black=True)
        return fg, obj_workspace_img

    def test_bbox_overlap(self, box_a, box_b):
        #bbox has format [xmin, ymin, xmax, ymax]
        if box_a[0] > box_b[2] or box_a[2] < box_b[0]:
            return False
        if box_a[1] > box_b[3] or box_a[3] < box_b[1]:
            return False
        return True

    def find_isolated_objects(self, bboxes, c_img):
        valid_bboxes = []
        for curr_ind in range(len(bboxes)):
            curr_bbox = bboxes[curr_ind]
            overlap = False
            for test_ind in range(curr_ind + 1, len(bboxes)):
                test_bbox = bboxes[test_ind]
                if self.test_bbox_overlap(curr_bbox, test_bbox):
                    overlap = True
                    break
            if not overlap:
                valid_bboxes.append(curr_bbox)
        return valid_bboxes

    def label_demo(self):
        self.gm.position_head()

        time.sleep(3)  #making sure the robot is finished moving

        c_img = self.cam.read_color_data()
        d_img = self.cam.read_depth_data()

        while not (c_img is None or d_img is None):
            path = "/home/autolab/Workspaces/michael_working/siemens_challenge/debug_imgs/web.png"
            cv2.imwrite(path, c_img)
            time.sleep(2)  #make sure new image is written before being read

            # print "\n new iteration"
            main_mask = crop_img(c_img, simple=True)
            col_img = ColorImage(c_img)
            workspace_img = col_img.mask_binary(main_mask)

            labels = self.web.label_image(path)

            obj = labels['objects'][0]
            bbox = obj['box']
            class_label = obj['class']

            #bbox has format [xmin, ymin, xmax, ymax]
            fg, obj_w = self.bbox_to_fg(bbox, c_img, col_img)
            cv2.imwrite("debug_imgs/test.png", obj_w.data)

            groups = get_cluster_info(fg)
            display_grasps(workspace_img, groups)

            group = groups[0]
            pose, rot = self.gm.compute_grasp(group.cm, group.dir, d_img)
            grasp_pose = self.gripper.get_grasp_pose(pose[0],
                                                     pose[1],
                                                     pose[2],
                                                     rot,
                                                     c_img=workspace_img.data)

            self.gm.execute_grasp(grasp_pose, class_num=class_label)

            #reset to start
            self.whole_body.move_to_go()
            self.gm.move_to_home()
            self.gm.position_head()
            time.sleep(3)

            c_img = self.cam.read_color_data()
            d_img = self.cam.read_depth_data()
コード例 #7
0
class LabelDemo():

    def __init__(self):

        self.robot = hsrb_interface.Robot()
        self.br = tf.TransformBroadcaster()
        self.rgbd_map = RGBD2Map(self.br)
        # IPython.embed()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')
        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()

        self.com.go_to_initial_state(self.whole_body)
        self.grasp_count = 0
        self.tl = TransformListener()

        self.gp = GraspPlanner()
        self.gripper = Crane_Gripper(self.gp, self.cam, self.com.Options, self.robot.get('gripper'))
        self.suction = Suction_Gripper(self.gp, self.cam, self.com.Options, self.robot.get('suction'))
        self.gm = GraspManipulator(self.gp, self.gripper, self.suction, self.whole_body, self.omni_base, self.tl)
        self.web = Web_Labeler()

        thread.start_new_thread(self.broadcast_temp_bin,())

        time.sleep(3)
        print "after thread"

    def broadcast_temp_bin(self):
        while True:
            self.br.sendTransform((1, 1, 0.6),
                    tf.transformations.quaternion_from_euler(ai=0.0,aj=1.57,ak=0.0),
                    rospy.Time.now(),
                    'temp_bin',
                    # 'head_rgbd_sensor_link')
                    'map')
            rospy.sleep(1)

    def bbox_to_fg(self, bbox, c_img, col_img):
        obj_mask = crop_img(c_img, bycoords = [bbox[1], bbox[3], bbox[0], bbox[2]])
        obj_workspace_img = col_img.mask_binary(obj_mask)
        fg = obj_workspace_img.foreground_mask(cfg.COLOR_TOL, ignore_black=True)
        return fg, obj_workspace_img

    def test_bbox_overlap(self, box_a, box_b):
        #bbox has format [xmin, ymin, xmax, ymax]
        if box_a[0] > box_b[2] or box_a[2] < box_b[0]:
            return False
        if box_a[1] > box_b[3] or box_a[3] < box_b[1]:
            return False 
        return True 

    def find_isolated_objects(self, bboxes, c_img):
        valid_bboxes = []
        for curr_ind in range(len(bboxes)):
            curr_bbox = bboxes[curr_ind]
            overlap = False  
            for test_ind in range(curr_ind + 1, len(bboxes)):
                test_bbox = bboxes[test_ind]
                if self.test_bbox_overlap(curr_bbox, test_bbox):
                    overlap = True 
                    break 
            if not overlap:
                valid_bboxes.append(curr_bbox)
        return valid_bboxes

    def label_demo(self):
        """ Main method which executes the stuff we're interested in.
        
        Should apply to both the physical and simulated HSRs. Call as `python
        main/test_labeling.py`.
        """
        self.gm.position_head()
        time.sleep(3) #making sure the robot is finished moving
        c_img = self.cam.read_color_data()
        d_img = self.cam.read_depth_data()

        path = "/home/ron/siemens_sim/siemens_challenge/debug_imgs/web.png"
        cv2.imwrite(path, c_img)
        time.sleep(2) #make sure new image is written before being read

        # print "\n new iteration"
        main_mask = crop_img(c_img, simple=True)
        col_img = ColorImage(c_img)
        workspace_img = col_img.mask_binary(main_mask)

        labels = self.web.label_image(path)

        obj = labels['objects'][0]
        bbox = obj['box']
        class_label = obj['class']

        #bbox has format [xmin, ymin, xmax, ymax]
        fg, obj_w = self.bbox_to_fg(bbox, c_img, col_img)
        cv2.imwrite("debug_imgs/test.png", obj_w.data)

        groups = get_cluster_info(fg)
        display_grasps(workspace_img, groups)

        group = groups[0]
        print(d_img)
        pose,rot = self.gm.compute_grasp(group.cm, group.dir, d_img)
        pose = find_pose(pose)
        if pose == None:
            print("unable to find corresponding item.")
            sys.exit()

        a = tf.transformations.quaternion_from_euler(ai=-2.355,aj=-3.14,ak=0.0)
        b = tf.transformations.quaternion_from_euler(ai=0.0,aj=0.0,ak=1.57)

        base_rot = tf.transformations.quaternion_multiply(a,b)

        print("now about to get grasp pose, w/pose: {}, rot: {}".format(pose, rot))
        thread.start_new_thread(self.gripper.loop_broadcast,(pose,base_rot,rot))
        time.sleep(5)
        print("now calling execute_grasp w/grasp_pose: {}".format(grasp_pose))
        # IPython.embed()
        self.gm.execute_grasp("grasp_0")
        self.whole_body.move_end_effector_pose(geometry.pose(),"temp_bin")
        self.gripper.open_gripper()



        #reset to start
        self.whole_body.move_to_go()
        # self.gm.move_to_home()
        self.gm.position_head()
        time.sleep(3)
コード例 #8
0
class LegoDemo():
    def __init__(self):
        """
        Class to run HSR lego task

        """

        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()
        # if not DEBUG:
        self.com.go_to_initial_state(self.whole_body)

        #     self.tt = TableTop()
        #     self.tt.find_table(self.robot)

        self.grasp_count = 0

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        self.gp = GraspPlanner()
        self.gripper = Crane_Gripper(self.gp, self.cam, self.com.Options,
                                     self.robot.get('gripper'))
        self.suction = Suction_Gripper(self.gp, self.cam, self.com.Options,
                                       self.robot.get('suction'))

        self.gm = GraspManipulator(self.gp, self.gripper, self.suction,
                                   self.whole_body, self.omni_base)

        self.collision_world = hsrb_interface.collision_world.CollisionWorld(
            "global_collision_world")
        self.collision_world.remove_all()
        self.collision_world.add_box(x=.8,
                                     y=.9,
                                     z=0.5,
                                     pose=geometry.pose(y=1.4, z=0.15),
                                     frame_id='map')

        print "after thread"

    def find_mean_depth(self, d_img):
        """
        Evaluates the current policy and then executes the motion
        specified in the the common class
        """

        indx = np.nonzero(d_img)

        mean = np.mean(d_img[indx])

        return

    def get_success(self, action):
        """
        Parameters
        ----------
        action : str
            action to query success of
        Returns
        -------
        :str: y or n
        """
        if cfg.COLLECT_DATA and cfg.QUERY:
            print("Was " + action + " successful? (y or n)")
            succ = raw_input()
            while not (succ == "y" or succ == "n"):
                print("Enter only y or n to indicate success of " + action)
                succ = raw_input()
            return succ
        else:
            return "no data queried"

    def get_int(self):
        """
        Returns
        -------
        int
        """
        if cfg.COLLECT_DATA and cfg.QUERY:
            print("How many legos are on the table?")
            inp = raw_input()
            while not inp.isdigit():
                print("Enter an integer.")
                inp = raw_input()
            return inp
        else:
            return "no data queried"

    def get_str(self):
        """
        Returns
        -------
        :str: arbitary value
        """
        if cfg.COLLECT_DATA and cfg.QUERY:
            print(
                "Any notes? (pushed legos off table, ran into table, etc.) (no if none)"
            )
            inp = raw_input()
            return inp
        else:
            return "no data queried"

    def run_singulation(self, col_img, main_mask, d_img, to_singulate):
        """
        Parameters
        ----------
        col_img : `ColorImage`
        main_mask : `BinaryImage`
        d_img : 'DepthImage'
        to_singulate : list of `Group`
        """
        print("SINGULATING")

        self.dm.update_traj("action", "singulate")

        a = time.time()
        singulator = Singulation(col_img, main_mask,
                                 [g.mask for g in to_singulate])
        waypoints, rot, free_pix = singulator.get_singulation()
        self.dm.update_traj("compute_singulate_time", time.time() - a)

        singulator.display_singulation()

        self.dm.update_traj("singulate_info", (waypoints, rot, free_pix))
        self.dm.update_traj("singulate_successes", "crashed")
        self.dm.update_traj("execute_singulate_time", "crashed")
        self.dm.overwrite_traj()

        a = time.time()
        self.gm.singulate(waypoints, rot, col_img.data, d_img, expand=True)
        self.dm.update_traj("execute_singulate_time", time.time() - a)
        self.dm.update_traj("singulate_success",
                            self.get_success("singulation"))
        self.dm.overwrite_traj()

    def run_grasps(self, workspace_img, to_grasp):
        """
        Parameters
        ----------
        workspace_img : `ColorImage`
        to_grasp : list of tuple of:
            (`Group`, grasp_pose, suction_pose, class_num)
        """

        print("GRASPING")

        #impose ordering on grasps (by closest/highest y first)
        to_grasp.sort(key=lambda g: -1 * g[0].cm[0])

        self.dm.update_traj("action", "grasp")

        self.dm.update_traj("all_grasps_info",
                            [(c[0].cm, c[0].dir, c[0].mask.data, c[3])
                             for c in to_grasp])

        if not cfg.CHAIN_GRASPS:
            to_grasp = to_grasp[0:1]

        self.dm.update_traj("run_grasps_info",
                            [(c[0].cm, c[0].dir, c[0].mask.data, c[3])
                             for c in to_grasp])

        display_grasps(workspace_img, [g[0] for g in to_grasp])

        successes = ["not attempted" for i in range(len(to_grasp))]
        correct_colors = ["not attempted" for i in range(len(to_grasp))]
        times = ["not attempted" for i in range(len(to_grasp))]

        for i in range(len(to_grasp)):
            print "grasping", to_grasp[i][1]

            successes[i] = "crashed"
            correct_colors[i] = "crashed"
            times[i] = "crashed"
            self.dm.update_traj("grasp_successes", successes)
            self.dm.update_traj("grasp_colors", correct_colors)
            self.dm.update_traj("execute_grasp_times", times)
            self.dm.overwrite_traj()

            a = time.time()
            self.gm.execute_grasp(to_grasp[i][1], class_num=to_grasp[i][3])
            # self.gm.execute_suction(to_grasp[i][2], to_grasp[i][3])

            times[i] = time.time() - a
            successes[i] = self.get_success("grasp")
            correct_colors[i] = self.get_success("correct color")

            self.dm.update_traj("grasp_successes", successes)
            self.dm.update_traj("grasp_colors", correct_colors)
            self.dm.update_traj("execute_grasp_times", times)
            self.dm.overwrite_traj()

    def clusters_to_actions(self, groups, col_img, d_img, workspace_img):
        """
        Parameters
        ----------
        groups : list of `Group`
        col_img : `ColorImage`
        d_img : `DepthImage`
        workspace_img : `ColorImage`

        Returns
        -------
        list of tuples of form:
            (`Group`, grasp_pose, suction_pose, class_num)
        list of `Group`
        """
        find_grasps_times = []
        compute_grasps_times = []
        find_hsv_times = []

        to_singulate = []
        to_grasp = []
        for group in groups:
            a = time.time()
            inner_groups = grasps_within_pile(col_img.mask_binary(group.mask))
            find_grasps_times.append(time.time() - a)

            if len(inner_groups) == 0:
                to_singulate.append(group)
            else:
                for in_group in inner_groups:
                    a = time.time()
                    pose, rot = self.gm.compute_grasp(in_group.cm,
                                                      in_group.dir, d_img)
                    grasp_pose = self.gripper.get_grasp_pose(
                        pose[0],
                        pose[1],
                        pose[2],
                        rot,
                        c_img=workspace_img.data)
                    suction_pose = self.suction.get_grasp_pose(
                        pose[0],
                        pose[1],
                        pose[2],
                        rot,
                        c_img=workspace_img.data)
                    compute_grasps_times.append(time.time() - a)

                    a = time.time()
                    class_num = hsv_classify(col_img.mask_binary(
                        in_group.mask))
                    find_hsv_times.append(time.time() - a)

                    to_grasp.append(
                        (in_group, grasp_pose, suction_pose, class_num))
        self.dm.update_traj("compute_grasps_times", compute_grasps_times)
        self.dm.update_traj("find_grasps_times", find_grasps_times)
        self.dm.update_traj("find_hsv_times", find_hsv_times)

        return to_grasp, to_singulate

    def lego_demo(self):
        if not cfg.COLLECT_DATA:
            print("WARNING: NO DATA IS BEING COLLECTED")
            print("TO COLLECT DATA, CHANGE COLLECT_DATA IN config_tpc")

        self.dm = DataManager()
        self.get_new_grasp = True

        # DEBUG = False
        # if not DEBUG:
        #     self.gm.position_head()

        time.sleep(3)  #making sure the robot is finished moving
        c_img = self.cam.read_color_data()
        d_img = self.cam.read_depth_data()

        while not (c_img is None or d_img is None):
            print "\n new iteration"

            main_mask = crop_img(c_img, use_preset=True, arc=False)
            col_img = ColorImage(c_img)
            workspace_img = col_img.mask_binary(main_mask)

            self.dm.clear_traj()
            self.dm.update_traj("c_img", c_img)
            self.dm.update_traj("d_img", d_img)
            self.dm.update_traj("stop_condition", "crash")
            cv2.imwrite("debug_imgs/c_img.png", c_img)
            self.dm.update_traj("crop", workspace_img.data)

            a = time.time()
            groups = run_connected_components(workspace_img, viz=True)
            self.dm.update_traj("connected_components_time", time.time() - a)

            self.dm.update_traj("num_legos", self.get_int())

            self.dm.append_traj()

            print "num masses", len(groups)
            if len(groups) == 0:
                print("cleared workspace")
                self.dm.update_traj("stop_condition",
                                    self.get_success("clearing table"))
                self.dm.overwrite_traj()
                time.sleep(5)
                break

            to_grasp, to_singulate = self.clusters_to_actions(
                groups, col_img, d_img, workspace_img)

            self.whole_body.collision_world = self.collision_world
            if len(to_grasp) > 0:
                self.run_grasps(workspace_img, to_grasp)
            else:
                self.run_singulation(col_img, main_mask, d_img, to_singulate)

            self.dm.update_traj("notes", self.get_str())

            #reset to start
            self.whole_body.move_to_go()
            self.gm.move_to_home()
            self.gm.position_head()

            time.sleep(8)  #making sure the robot is finished moving
            c_img = self.cam.read_color_data()
            d_img = self.cam.read_depth_data()

            self.dm.update_traj("stop_condition", "none")
            self.dm.update_traj("c_img_result", c_img)
            self.dm.update_traj("d_img_result", d_img)
            self.dm.overwrite_traj()
コード例 #9
0
    cam = RGBD()
    com = COM()

    com.go_to_initial_state(whole_body)

    tt = TableTop()
    tt.find_table(robot)

    grasp_count = 0

    br = tf.TransformBroadcaster()
    tl = TransformListener()

    gp = GraspPlanner()
    gripper = Crane_Gripper(gp, cam, com.Options, robot.get('gripper'))
    suction = Suction_Gripper(gp, cam, com.Options, robot.get('suction'))
    gm = GraspManipulator(gp, gripper, suction, whole_body, omni_base, tt)
    gm.position_head()

    print "after thread"
    curr_offsets = np.array([0, 0, -0.5])
    curr_rot = np.array([0.0,0.0,1.57])

    while True:
        label = id_generator()
        tt.make_new_pose(curr_offsets,label,rot = curr_rot)
        whole_body.move_end_effector_pose(geometry.pose(z=-0.1), label)
        delta = raw_input()
        while not (delta in ["+x", "-x", "+y", "-y", "+z", "-z"]):
            print("not a valid delta")
コード例 #10
0
class BedMaker():
    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot
        com : The common class for the robot
        cam : An open bincam class

        debug : bool

            A bool to indicate whether or not to display a training set point for
            debuging.

        '''

        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()

        if not DEBUG:
            self.com.go_to_initial_state(self.whole_body)

            self.tt = TableTop()
            self.tt.find_table(self.robot)

        self.wl = Python_Labeler(cam=self.cam)

        self.grasp_count = 0

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        self.ds = data_saver('tpc_rollouts/rollouts')

        self.gp = GraspPlanner()
        self.gripper = Crane_Gripper(self.gp, cam, options,
                                     robot.get('gripper'))

        self.gm = GraspManipulator(self.gp, self.gripper, self.whole_body,
                                   self.omni_base, self.tt)

        print "after thread"

    def find_mean_depth(self, d_img):
        '''
        Evaluates the current policy and then executes the motion
        specified in the the common class
        '''

        indx = np.nonzero(d_img)

        mean = np.mean(d_img[indx])

        return

    def lego_demo(self):

        self.rollout_data = []
        self.get_new_grasp = True

        if not DEBUG:
            self.gm.position_head()

        while True:
            time.sleep(1)  #making sure the robot is finished moving
            c_img = self.cam.read_color_data()
            d_img = self.cam.read_depth_data()
            cv2.imwrite("debug_imgs/c_img.png", c_img)
            print "\n new iteration"

            if (not c_img == None and not d_img == None):
                #label image
                data = self.wl.label_image(c_img)
                c_img = self.cam.read_color_data()

                grasp_boxes = []
                suction_boxes = []
                singulate_boxes = []

                for i in range(data['num_labels']):
                    bbox = data['objects'][i]['box']
                    classnum = data['objects'][i]['class']
                    classname = ['grasp', 'singulate', 'suction',
                                 'quit'][classnum]
                    if classname == "grasp":
                        grasp_boxes.append(bbox)
                    elif classname == "suction":
                        suction_boxes.append(bbox)
                    elif classname == "singulate":
                        singulate_boxes.append(bbox)
                    elif classname == "quit":
                        self.ds.save_rollout()
                        self.gm.move_to_home()
                self.ds.append_data_to_rollout(c_img, data)
                main_mask = crop_img(c_img)
                col_img = ColorImage(c_img)
                workspace_img = col_img.mask_binary(main_mask)

                grasps = []
                viz_info = []
                for i in range(len(grasp_boxes)):
                    bbox = grasp_boxes[i]
                    center_mass, direction = bbox_to_grasp(bbox, c_img, d_img)

                    viz_info.append([center_mass, direction])
                    pose, rot = self.gm.compute_grasp(center_mass, direction,
                                                      d_img)
                    grasps.append(
                        self.gripper.get_grasp_pose(pose[0],
                                                    pose[1],
                                                    pose[2],
                                                    rot,
                                                    c_img=workspace_img.data))

                suctions = []
                for i in range(len(suction_boxes)):
                    suctions.append("compute_suction?")

                if len(grasps) > 0 or len(suctions) > 0:
                    cv2.imwrite(
                        "grasps.png",
                        visualize(workspace_img, [v[0] for v in viz_info],
                                  [v[1] for v in viz_info]))
                    print "running grasps"

                    for grasp in grasps:
                        print "grasping", grasp
                        self.gm.execute_grasp(grasp)
                    print "running suctions"
                    for suction in suctions:
                        print "suctioning", suction
                        #execute suction
                elif len(singulate_boxes) > 0:
                    print("singulating")
                    bbox = singulate_boxes[0]
                    obj_mask = bbox_to_mask(bbox, c_img)
                    start, end = find_singulation(col_img, main_mask, obj_mask)
                    display_singulation(start, end, workspace_img)

                    self.gm.singulate(start, end, c_img, d_img)
                else:
                    print("cleared workspace")

                self.whole_body.move_to_go()
                self.gm.position_head()
コード例 #11
0
class FullWebDemo():
    def __init__(self):
        """
        Class to run HSR lego task

        """
        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()

        self.com.go_to_initial_state(self.whole_body)

        self.grasp_count = 0

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        self.gp = GraspPlanner()
        self.gripper = Crane_Gripper(self.gp, self.cam, self.com.Options,
                                     self.robot.get('gripper'))
        self.suction = Suction_Gripper(self.gp, self.cam, self.com.Options,
                                       self.robot.get('suction'))

        self.gm = GraspManipulator(self.gp, self.gripper, self.suction,
                                   self.whole_body, self.omni_base, self.tl)

        self.web = Web_Labeler()
        print "after thread"

    def run_grasp(self, bbox, c_img, col_img, workspace_img, d_img):
        print("grasping a " + cfg.labels[bbox.label])
        #bbox has format [xmin, ymin, xmax, ymax]
        fg, obj_w = bbox.to_mask(c_img, col_img)
        # cv2.imwrite("debug_imgs/test.png", obj_w.data)
        # cv2.imwrite("debug_imgs/test2.png", fg.data)
        groups = get_cluster_info(fg)
        curr_tol = cfg.COLOR_TOL
        while len(groups) == 0 and curr_tol > 10:
            curr_tol -= 5
            #retry with lower tolerance- probably white object
            fg, obj_w = bbox.to_mask(c_img, col_img, tol=curr_tol)
            groups = get_cluster_info(fg)

        if len(groups) == 0:
            print("No object within bounding box")
            return False

        display_grasps(workspace_img, groups)

        group = groups[0]
        pose, rot = self.gm.compute_grasp(group.cm, group.dir, d_img)
        grasp_pose = self.gripper.get_grasp_pose(pose[0],
                                                 pose[1],
                                                 pose[2],
                                                 rot,
                                                 c_img=workspace_img.data)

        self.gm.execute_grasp(grasp_pose, class_num=bbox.label)

    def run_singulate(self, col_img, main_mask, to_singulate, d_img):
        print("singulating")
        singulator = Singulation(col_img, main_mask,
                                 [g.mask for g in to_singulate])
        waypoints, rot, free_pix = singulator.get_singulation()

        singulator.display_singulation()
        self.gm.singulate(waypoints, rot, col_img.data, d_img, expand=True)

    def full_web_demo(self):
        self.gm.position_head()

        time.sleep(3)  #making sure the robot is finished moving

        c_img = self.cam.read_color_data()
        d_img = self.cam.read_depth_data()

        while not (c_img is None or d_img is None):
            path = "/home/autolab/Workspaces/michael_working/siemens_challenge/debug_imgs/web.png"
            cv2.imwrite(path, c_img)
            time.sleep(2)  #make sure new image is written before being read

            # print "\n new iteration"
            main_mask = crop_img(c_img, simple=True)
            col_img = ColorImage(c_img)
            workspace_img = col_img.mask_binary(main_mask)

            labels = self.web.label_image(path)

            objs = labels['objects']
            bboxes = [Bbox(obj['box'], obj['class']) for obj in objs]
            single_objs = find_isolated_objects(bboxes)

            if len(single_objs) > 0:
                to_grasp = select_first_obj(single_objs)
                self.run_grasp(to_grasp, c_img, col_img, workspace_img, d_img)
            else:
                #for accurate singulation should have bboxes for all
                fg_imgs = [box.to_mask(c_img, col_img) for box in bboxes]
                groups = [get_cluster_info(fg[0])[0] for fg in fg_imgs]
                groups = merge_groups(groups, cfg.DIST_TOL)
                self.run_singulate(col_img, main_mask, groups, d_img)

            #reset to start
            self.whole_body.move_to_go()
            self.gm.move_to_home()
            self.gm.position_head()
            time.sleep(3)

            c_img = self.cam.read_color_data()
            d_img = self.cam.read_depth_data()
コード例 #12
0
class SiemensDemo():
    def __init__(self):
        """
        Class to run HSR lego task

        """
        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()

        self.com.go_to_initial_state(self.whole_body)

        self.grasp_count = 0
        self.helper = Helper(cfg)

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        self.gp = GraspPlanner()
        self.gripper = Crane_Gripper(self.gp, self.cam, self.com.Options,
                                     self.robot.get('gripper'))
        self.suction = Suction_Gripper(self.gp, self.cam, self.com.Options,
                                       self.robot.get('suction'))

        self.gm = GraspManipulator(self.gp, self.gripper, self.suction,
                                   self.whole_body, self.omni_base, self.tl)

        self.dl = DataLogger("stats_data/model_base", cfg.EVALUATE)

        self.web = Web_Labeler(cfg.NUM_ROBOTS_ON_NETWORK)

        model_path = 'main/output_inference_graph.pb'
        label_map_path = 'main/object-detection.pbtxt'
        self.det = Detector(model_path, label_map_path)

        print "after thread"

    def run_grasp(self, bbox, c_img, col_img, workspace_img, d_img):
        print("grasping a " + cfg.labels[bbox.label])

        group = bbox.to_group(c_img, col_img)
        display_grasps(workspace_img, [group])

        pose, rot = self.gm.compute_grasp(group.cm, group.dir, d_img)
        grasp_pose = self.gripper.get_grasp_pose(pose[0],
                                                 pose[1],
                                                 pose[2],
                                                 rot,
                                                 c_img=workspace_img.data)
        self.gm.execute_grasp(grasp_pose, class_num=bbox.label)

    def run_singulate(self, col_img, main_mask, to_singulate, d_img):
        print("singulating")
        singulator = Singulation(col_img, main_mask,
                                 [g.mask for g in to_singulate])
        waypoints, rot, free_pix = singulator.get_singulation()

        singulator.display_singulation()
        self.gm.singulate(waypoints, rot, col_img.data, d_img, expand=True)

    def get_bboxes_from_net(self, path):
        output_dict, vis_util_image = self.det.predict(path)
        plt.savefig('debug_imgs/predictions.png')
        plt.close()
        plt.clf()
        plt.cla()
        img = cv2.imread(path)
        boxes = format_net_bboxes(output_dict, img.shape)
        return boxes, vis_util_image

    def get_bboxes_from_web(self, path):
        labels = self.web.label_image(path)

        objs = labels['objects']
        bboxes = [Bbox(obj['box'], obj['class']) for obj in objs]
        return bboxes

    def determine_to_ask_for_help(self, bboxes, col_img):
        bboxes = deepcopy(bboxes)
        col_img = deepcopy(col_img)

        single_objs = find_isolated_objects_by_overlap(bboxes)

        if len(single_objs) > 0:
            return False
        else:
            isolated_exist = find_isolated_objects_by_distance(bboxes, col_img)
            return isolated_exist

    def get_bboxes(self, path, col_img):
        boxes, vis_util_image = self.get_bboxes_from_net(path)

        #low confidence or no objects
        if self.determine_to_ask_for_help(boxes, col_img):
            self.helper.asked = True
            self.helper.start_timer()
            boxes = self.get_bboxes_from_web(path)
            self.helper.stop_timer()
            self.dl.save_stat("duration", self.helper.duration)
            self.dl.save_stat("num_online", cfg.NUM_ROBOTS_ON_NETWORK)

        return boxes, vis_util_image

    def siemens_demo(self):
        self.gm.position_head()

        time.sleep(3)  #making sure the robot is finished moving

        c_img = self.cam.read_color_data()
        d_img = self.cam.read_depth_data()

        while not (c_img is None or d_img is None):
            path = "/home/autolab/Workspaces/michael_working/siemens_challenge/debug_imgs/web.png"
            cv2.imwrite(path, c_img)
            time.sleep(2)  #make sure new image is written before being read

            # print "\n new iteration"
            main_mask = crop_img(c_img, simple=True)
            col_img = ColorImage(c_img)
            workspace_img = col_img.mask_binary(main_mask)

            bboxes, vis_util_image = self.get_bboxes(path, col_img)
            if len(bboxes) == 0:
                print("Cleared the workspace")
                print("Add more objects, then resume")
                IPython.embed()
            else:
                box_viz = draw_boxes(bboxes, c_img)
                cv2.imwrite("debug_imgs/box.png", box_viz)
                single_objs = find_isolated_objects_by_overlap(bboxes)
                grasp_sucess = 1.0
                if len(single_objs) == 0:
                    single_objs = find_isolated_objects_by_distance(
                        bboxes, col_img)

                if len(single_objs) > 0:
                    to_grasp = select_first_obj(single_objs)
                    singulation_time = 0.0
                    self.run_grasp(to_grasp, c_img, col_img, workspace_img,
                                   d_img)
                    grasp_sucess = self.dl.record_success(
                        "grasp", other_data=[c_img, vis_util_image, d_img])
                else:
                    #for accurate singulation should have bboxes for all
                    fg_imgs = [box.to_mask(c_img, col_img) for box in bboxes]
                    groups = [get_cluster_info(fg[0])[0] for fg in fg_imgs]
                    groups = merge_groups(groups, cfg.DIST_TOL)
                    self.run_singulate(col_img, main_mask, groups, d_img)
                    sing_start = time.time()
                    singulation_success = self.dl.record_success(
                        "singulation",
                        other_data=[c_img, vis_util_image, d_img])
                    singulation_time = time.time() - sing_start

                if cfg.EVALUATE:
                    reward = self.helper.get_reward(grasp_sucess,
                                                    singulation_time)
                    self.dl.record_reward(reward)

            #reset to start
            self.whole_body.move_to_go()
            self.gm.move_to_home()
            self.gm.position_head()
            time.sleep(3)

            c_img = self.cam.read_color_data()
            d_img = self.cam.read_depth_data()