def __init__(self):

        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()

        self.com.go_to_initial_state(self.whole_body)

        self.grasp_count = 0

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        self.gp = GraspPlanner()
        self.gripper = Crane_Gripper(self.gp, self.cam, self.com.Options,
                                     self.robot.get('gripper'))
        self.suction = Suction_Gripper(self.gp, self.cam, self.com.Options,
                                       self.robot.get('suction'))

        self.gm = GraspManipulator(self.gp, self.gripper, self.suction,
                                   self.whole_body, self.omni_base, self.tl)

        self.web = Web_Labeler()
        print "after thread"
Example #2
0
    def __init__(self):
        """
        Class to run HSR lego task

        """
        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()

        self.com.go_to_initial_state(self.whole_body)

        self.grasp_count = 0
        self.helper = Helper(cfg)

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        self.gp = GraspPlanner()
        self.gripper = Crane_Gripper(self.gp, self.cam, self.com.Options,
                                     self.robot.get('gripper'))
        self.suction = Suction_Gripper(self.gp, self.cam, self.com.Options,
                                       self.robot.get('suction'))

        self.gm = GraspManipulator(self.gp, self.gripper, self.suction,
                                   self.whole_body, self.omni_base, self.tl)

        self.dl = DataLogger("stats_data/model_base", cfg.EVALUATE)

        self.web = Web_Labeler(cfg.NUM_ROBOTS_ON_NETWORK)

        model_path = 'main/output_inference_graph.pb'
        label_map_path = 'main/object-detection.pbtxt'
        self.det = Detector(model_path, label_map_path)

        print "after thread"
class LabelDemo():
    def __init__(self):

        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()

        self.com.go_to_initial_state(self.whole_body)

        self.grasp_count = 0

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        self.gp = GraspPlanner()
        self.gripper = Crane_Gripper(self.gp, self.cam, self.com.Options,
                                     self.robot.get('gripper'))
        self.suction = Suction_Gripper(self.gp, self.cam, self.com.Options,
                                       self.robot.get('suction'))

        self.gm = GraspManipulator(self.gp, self.gripper, self.suction,
                                   self.whole_body, self.omni_base, self.tl)

        self.web = Web_Labeler()
        print "after thread"

    def bbox_to_fg(self, bbox, c_img, col_img):
        obj_mask = crop_img(c_img,
                            bycoords=[bbox[1], bbox[3], bbox[0], bbox[2]])
        obj_workspace_img = col_img.mask_binary(obj_mask)
        fg = obj_workspace_img.foreground_mask(cfg.COLOR_TOL,
                                               ignore_black=True)
        return fg, obj_workspace_img

    def test_bbox_overlap(self, box_a, box_b):
        #bbox has format [xmin, ymin, xmax, ymax]
        if box_a[0] > box_b[2] or box_a[2] < box_b[0]:
            return False
        if box_a[1] > box_b[3] or box_a[3] < box_b[1]:
            return False
        return True

    def find_isolated_objects(self, bboxes, c_img):
        valid_bboxes = []
        for curr_ind in range(len(bboxes)):
            curr_bbox = bboxes[curr_ind]
            overlap = False
            for test_ind in range(curr_ind + 1, len(bboxes)):
                test_bbox = bboxes[test_ind]
                if self.test_bbox_overlap(curr_bbox, test_bbox):
                    overlap = True
                    break
            if not overlap:
                valid_bboxes.append(curr_bbox)
        return valid_bboxes

    def label_demo(self):
        self.gm.position_head()

        time.sleep(3)  #making sure the robot is finished moving

        c_img = self.cam.read_color_data()
        d_img = self.cam.read_depth_data()

        while not (c_img is None or d_img is None):
            path = "/home/autolab/Workspaces/michael_working/siemens_challenge/debug_imgs/web.png"
            cv2.imwrite(path, c_img)
            time.sleep(2)  #make sure new image is written before being read

            # print "\n new iteration"
            main_mask = crop_img(c_img, simple=True)
            col_img = ColorImage(c_img)
            workspace_img = col_img.mask_binary(main_mask)

            labels = self.web.label_image(path)

            obj = labels['objects'][0]
            bbox = obj['box']
            class_label = obj['class']

            #bbox has format [xmin, ymin, xmax, ymax]
            fg, obj_w = self.bbox_to_fg(bbox, c_img, col_img)
            cv2.imwrite("debug_imgs/test.png", obj_w.data)

            groups = get_cluster_info(fg)
            display_grasps(workspace_img, groups)

            group = groups[0]
            pose, rot = self.gm.compute_grasp(group.cm, group.dir, d_img)
            grasp_pose = self.gripper.get_grasp_pose(pose[0],
                                                     pose[1],
                                                     pose[2],
                                                     rot,
                                                     c_img=workspace_img.data)

            self.gm.execute_grasp(grasp_pose, class_num=class_label)

            #reset to start
            self.whole_body.move_to_go()
            self.gm.move_to_home()
            self.gm.position_head()
            time.sleep(3)

            c_img = self.cam.read_color_data()
            d_img = self.cam.read_depth_data()
class LabelDemo():

    def __init__(self):

        self.robot = hsrb_interface.Robot()
        self.br = tf.TransformBroadcaster()
        self.rgbd_map = RGBD2Map(self.br)
        # IPython.embed()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')
        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()

        self.com.go_to_initial_state(self.whole_body)
        self.grasp_count = 0
        self.tl = TransformListener()

        self.gp = GraspPlanner()
        self.gripper = Crane_Gripper(self.gp, self.cam, self.com.Options, self.robot.get('gripper'))
        self.suction = Suction_Gripper(self.gp, self.cam, self.com.Options, self.robot.get('suction'))
        self.gm = GraspManipulator(self.gp, self.gripper, self.suction, self.whole_body, self.omni_base, self.tl)
        self.web = Web_Labeler()

        thread.start_new_thread(self.broadcast_temp_bin,())

        time.sleep(3)
        print "after thread"

    def broadcast_temp_bin(self):
        while True:
            self.br.sendTransform((1, 1, 0.6),
                    tf.transformations.quaternion_from_euler(ai=0.0,aj=1.57,ak=0.0),
                    rospy.Time.now(),
                    'temp_bin',
                    # 'head_rgbd_sensor_link')
                    'map')
            rospy.sleep(1)

    def bbox_to_fg(self, bbox, c_img, col_img):
        obj_mask = crop_img(c_img, bycoords = [bbox[1], bbox[3], bbox[0], bbox[2]])
        obj_workspace_img = col_img.mask_binary(obj_mask)
        fg = obj_workspace_img.foreground_mask(cfg.COLOR_TOL, ignore_black=True)
        return fg, obj_workspace_img

    def test_bbox_overlap(self, box_a, box_b):
        #bbox has format [xmin, ymin, xmax, ymax]
        if box_a[0] > box_b[2] or box_a[2] < box_b[0]:
            return False
        if box_a[1] > box_b[3] or box_a[3] < box_b[1]:
            return False 
        return True 

    def find_isolated_objects(self, bboxes, c_img):
        valid_bboxes = []
        for curr_ind in range(len(bboxes)):
            curr_bbox = bboxes[curr_ind]
            overlap = False  
            for test_ind in range(curr_ind + 1, len(bboxes)):
                test_bbox = bboxes[test_ind]
                if self.test_bbox_overlap(curr_bbox, test_bbox):
                    overlap = True 
                    break 
            if not overlap:
                valid_bboxes.append(curr_bbox)
        return valid_bboxes

    def label_demo(self):
        """ Main method which executes the stuff we're interested in.
        
        Should apply to both the physical and simulated HSRs. Call as `python
        main/test_labeling.py`.
        """
        self.gm.position_head()
        time.sleep(3) #making sure the robot is finished moving
        c_img = self.cam.read_color_data()
        d_img = self.cam.read_depth_data()

        path = "/home/ron/siemens_sim/siemens_challenge/debug_imgs/web.png"
        cv2.imwrite(path, c_img)
        time.sleep(2) #make sure new image is written before being read

        # print "\n new iteration"
        main_mask = crop_img(c_img, simple=True)
        col_img = ColorImage(c_img)
        workspace_img = col_img.mask_binary(main_mask)

        labels = self.web.label_image(path)

        obj = labels['objects'][0]
        bbox = obj['box']
        class_label = obj['class']

        #bbox has format [xmin, ymin, xmax, ymax]
        fg, obj_w = self.bbox_to_fg(bbox, c_img, col_img)
        cv2.imwrite("debug_imgs/test.png", obj_w.data)

        groups = get_cluster_info(fg)
        display_grasps(workspace_img, groups)

        group = groups[0]
        print(d_img)
        pose,rot = self.gm.compute_grasp(group.cm, group.dir, d_img)
        pose = find_pose(pose)
        if pose == None:
            print("unable to find corresponding item.")
            sys.exit()

        a = tf.transformations.quaternion_from_euler(ai=-2.355,aj=-3.14,ak=0.0)
        b = tf.transformations.quaternion_from_euler(ai=0.0,aj=0.0,ak=1.57)

        base_rot = tf.transformations.quaternion_multiply(a,b)

        print("now about to get grasp pose, w/pose: {}, rot: {}".format(pose, rot))
        thread.start_new_thread(self.gripper.loop_broadcast,(pose,base_rot,rot))
        time.sleep(5)
        print("now calling execute_grasp w/grasp_pose: {}".format(grasp_pose))
        # IPython.embed()
        self.gm.execute_grasp("grasp_0")
        self.whole_body.move_end_effector_pose(geometry.pose(),"temp_bin")
        self.gripper.open_gripper()



        #reset to start
        self.whole_body.move_to_go()
        # self.gm.move_to_home()
        self.gm.position_head()
        time.sleep(3)
 def __init__(self):
     self.web = Web_Labeler()
     print "after thread"
class DecisionDemo():
    def __init__(self):
        self.web = Web_Labeler()
        print "after thread"

    def run_grasp(self, bbox, c_img, col_img, workspace_img):
        print("grasping a " + cfg.labels[bbox.label])
        #bbox has format [xmin, ymin, xmax, ymax]
        fg, obj_w = bbox.to_mask(c_img, col_img)
        # cv2.imwrite("debug_imgs/test.png", obj_w.data)
        # cv2.imwrite("debug_imgs/test2.png", fg.data)
        groups = get_cluster_info(fg)
        curr_tol = cfg.COLOR_TOL
        while len(groups) == 0 and curr_tol > 10:
            curr_tol -= 5
            #retry with lower tolerance- probably white object
            fg, obj_w = bbox.to_mask(c_img, col_img, tol=curr_tol)
            groups = get_cluster_info(fg)

        if len(groups) == 0:
            print("No object within bounding box")
            return False

        display_grasps(workspace_img, groups)

    def run_singulate(self, col_img, main_mask, to_singulate):
        print("singulating")
        singulator = Singulation(col_img, main_mask,
                                 [g.mask for g in to_singulate])
        waypoints, rot, free_pix = singulator.get_singulation()

        singulator.display_singulation()

    def decision_demo(self):
        time.sleep(3)  #making sure the robot is finished moving

        sample_data_paths = [
            "debug_imgs/data_chris/test" + str(i) + ".png" for i in range(3)
        ]
        img_ind = 0
        c_img = cv2.imread(sample_data_paths[img_ind])

        while not (c_img is None):
            path = "/home/autolab/Workspaces/michael_working/siemens_challenge/debug_imgs/web.png"
            cv2.imwrite(path, c_img)
            time.sleep(2)  #make sure new image is written before being read

            # print "\n new iteration"
            main_mask = crop_img(c_img, simple=True)
            col_img = ColorImage(c_img)
            workspace_img = col_img.mask_binary(main_mask)

            labels = self.web.label_image(path)

            objs = labels['objects']
            bboxes = [Bbox(obj['box'], obj['class']) for obj in objs]
            single_objs = find_isolated_objects(bboxes)

            if len(single_objs) > 0:
                to_grasp = select_first_obj(single_objs)
                self.run_grasp(to_grasp, c_img, col_img, workspace_img)
            else:
                #for accurate singulation should have bboxes for all
                fg_imgs = [box.to_mask(c_img, col_img) for box in bboxes]
                groups = [get_cluster_info(fg[0])[0] for fg in fg_imgs]
                groups = merge_groups(groups, cfg.DIST_TOL)
                self.run_singulate(col_img, main_mask, groups)
            img_ind += 1
            c_img = cv2.imread(sample_data_paths[img_ind])
class FullWebDemo():
    def __init__(self):
        """
        Class to run HSR lego task

        """
        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()

        self.com.go_to_initial_state(self.whole_body)

        self.grasp_count = 0

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        self.gp = GraspPlanner()
        self.gripper = Crane_Gripper(self.gp, self.cam, self.com.Options,
                                     self.robot.get('gripper'))
        self.suction = Suction_Gripper(self.gp, self.cam, self.com.Options,
                                       self.robot.get('suction'))

        self.gm = GraspManipulator(self.gp, self.gripper, self.suction,
                                   self.whole_body, self.omni_base, self.tl)

        self.web = Web_Labeler()
        print "after thread"

    def run_grasp(self, bbox, c_img, col_img, workspace_img, d_img):
        print("grasping a " + cfg.labels[bbox.label])
        #bbox has format [xmin, ymin, xmax, ymax]
        fg, obj_w = bbox.to_mask(c_img, col_img)
        # cv2.imwrite("debug_imgs/test.png", obj_w.data)
        # cv2.imwrite("debug_imgs/test2.png", fg.data)
        groups = get_cluster_info(fg)
        curr_tol = cfg.COLOR_TOL
        while len(groups) == 0 and curr_tol > 10:
            curr_tol -= 5
            #retry with lower tolerance- probably white object
            fg, obj_w = bbox.to_mask(c_img, col_img, tol=curr_tol)
            groups = get_cluster_info(fg)

        if len(groups) == 0:
            print("No object within bounding box")
            return False

        display_grasps(workspace_img, groups)

        group = groups[0]
        pose, rot = self.gm.compute_grasp(group.cm, group.dir, d_img)
        grasp_pose = self.gripper.get_grasp_pose(pose[0],
                                                 pose[1],
                                                 pose[2],
                                                 rot,
                                                 c_img=workspace_img.data)

        self.gm.execute_grasp(grasp_pose, class_num=bbox.label)

    def run_singulate(self, col_img, main_mask, to_singulate, d_img):
        print("singulating")
        singulator = Singulation(col_img, main_mask,
                                 [g.mask for g in to_singulate])
        waypoints, rot, free_pix = singulator.get_singulation()

        singulator.display_singulation()
        self.gm.singulate(waypoints, rot, col_img.data, d_img, expand=True)

    def full_web_demo(self):
        self.gm.position_head()

        time.sleep(3)  #making sure the robot is finished moving

        c_img = self.cam.read_color_data()
        d_img = self.cam.read_depth_data()

        while not (c_img is None or d_img is None):
            path = "/home/autolab/Workspaces/michael_working/siemens_challenge/debug_imgs/web.png"
            cv2.imwrite(path, c_img)
            time.sleep(2)  #make sure new image is written before being read

            # print "\n new iteration"
            main_mask = crop_img(c_img, simple=True)
            col_img = ColorImage(c_img)
            workspace_img = col_img.mask_binary(main_mask)

            labels = self.web.label_image(path)

            objs = labels['objects']
            bboxes = [Bbox(obj['box'], obj['class']) for obj in objs]
            single_objs = find_isolated_objects(bboxes)

            if len(single_objs) > 0:
                to_grasp = select_first_obj(single_objs)
                self.run_grasp(to_grasp, c_img, col_img, workspace_img, d_img)
            else:
                #for accurate singulation should have bboxes for all
                fg_imgs = [box.to_mask(c_img, col_img) for box in bboxes]
                groups = [get_cluster_info(fg[0])[0] for fg in fg_imgs]
                groups = merge_groups(groups, cfg.DIST_TOL)
                self.run_singulate(col_img, main_mask, groups, d_img)

            #reset to start
            self.whole_body.move_to_go()
            self.gm.move_to_home()
            self.gm.position_head()
            time.sleep(3)

            c_img = self.cam.read_color_data()
            d_img = self.cam.read_depth_data()
Example #8
0
class SiemensDemo():
    def __init__(self):
        """
        Class to run HSR lego task

        """
        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()

        self.com.go_to_initial_state(self.whole_body)

        self.grasp_count = 0
        self.helper = Helper(cfg)

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        self.gp = GraspPlanner()
        self.gripper = Crane_Gripper(self.gp, self.cam, self.com.Options,
                                     self.robot.get('gripper'))
        self.suction = Suction_Gripper(self.gp, self.cam, self.com.Options,
                                       self.robot.get('suction'))

        self.gm = GraspManipulator(self.gp, self.gripper, self.suction,
                                   self.whole_body, self.omni_base, self.tl)

        self.dl = DataLogger("stats_data/model_base", cfg.EVALUATE)

        self.web = Web_Labeler(cfg.NUM_ROBOTS_ON_NETWORK)

        model_path = 'main/output_inference_graph.pb'
        label_map_path = 'main/object-detection.pbtxt'
        self.det = Detector(model_path, label_map_path)

        print "after thread"

    def run_grasp(self, bbox, c_img, col_img, workspace_img, d_img):
        print("grasping a " + cfg.labels[bbox.label])

        group = bbox.to_group(c_img, col_img)
        display_grasps(workspace_img, [group])

        pose, rot = self.gm.compute_grasp(group.cm, group.dir, d_img)
        grasp_pose = self.gripper.get_grasp_pose(pose[0],
                                                 pose[1],
                                                 pose[2],
                                                 rot,
                                                 c_img=workspace_img.data)
        self.gm.execute_grasp(grasp_pose, class_num=bbox.label)

    def run_singulate(self, col_img, main_mask, to_singulate, d_img):
        print("singulating")
        singulator = Singulation(col_img, main_mask,
                                 [g.mask for g in to_singulate])
        waypoints, rot, free_pix = singulator.get_singulation()

        singulator.display_singulation()
        self.gm.singulate(waypoints, rot, col_img.data, d_img, expand=True)

    def get_bboxes_from_net(self, path):
        output_dict, vis_util_image = self.det.predict(path)
        plt.savefig('debug_imgs/predictions.png')
        plt.close()
        plt.clf()
        plt.cla()
        img = cv2.imread(path)
        boxes = format_net_bboxes(output_dict, img.shape)
        return boxes, vis_util_image

    def get_bboxes_from_web(self, path):
        labels = self.web.label_image(path)

        objs = labels['objects']
        bboxes = [Bbox(obj['box'], obj['class']) for obj in objs]
        return bboxes

    def determine_to_ask_for_help(self, bboxes, col_img):
        bboxes = deepcopy(bboxes)
        col_img = deepcopy(col_img)

        single_objs = find_isolated_objects_by_overlap(bboxes)

        if len(single_objs) > 0:
            return False
        else:
            isolated_exist = find_isolated_objects_by_distance(bboxes, col_img)
            return isolated_exist

    def get_bboxes(self, path, col_img):
        boxes, vis_util_image = self.get_bboxes_from_net(path)

        #low confidence or no objects
        if self.determine_to_ask_for_help(boxes, col_img):
            self.helper.asked = True
            self.helper.start_timer()
            boxes = self.get_bboxes_from_web(path)
            self.helper.stop_timer()
            self.dl.save_stat("duration", self.helper.duration)
            self.dl.save_stat("num_online", cfg.NUM_ROBOTS_ON_NETWORK)

        return boxes, vis_util_image

    def siemens_demo(self):
        self.gm.position_head()

        time.sleep(3)  #making sure the robot is finished moving

        c_img = self.cam.read_color_data()
        d_img = self.cam.read_depth_data()

        while not (c_img is None or d_img is None):
            path = "/home/autolab/Workspaces/michael_working/siemens_challenge/debug_imgs/web.png"
            cv2.imwrite(path, c_img)
            time.sleep(2)  #make sure new image is written before being read

            # print "\n new iteration"
            main_mask = crop_img(c_img, simple=True)
            col_img = ColorImage(c_img)
            workspace_img = col_img.mask_binary(main_mask)

            bboxes, vis_util_image = self.get_bboxes(path, col_img)
            if len(bboxes) == 0:
                print("Cleared the workspace")
                print("Add more objects, then resume")
                IPython.embed()
            else:
                box_viz = draw_boxes(bboxes, c_img)
                cv2.imwrite("debug_imgs/box.png", box_viz)
                single_objs = find_isolated_objects_by_overlap(bboxes)
                grasp_sucess = 1.0
                if len(single_objs) == 0:
                    single_objs = find_isolated_objects_by_distance(
                        bboxes, col_img)

                if len(single_objs) > 0:
                    to_grasp = select_first_obj(single_objs)
                    singulation_time = 0.0
                    self.run_grasp(to_grasp, c_img, col_img, workspace_img,
                                   d_img)
                    grasp_sucess = self.dl.record_success(
                        "grasp", other_data=[c_img, vis_util_image, d_img])
                else:
                    #for accurate singulation should have bboxes for all
                    fg_imgs = [box.to_mask(c_img, col_img) for box in bboxes]
                    groups = [get_cluster_info(fg[0])[0] for fg in fg_imgs]
                    groups = merge_groups(groups, cfg.DIST_TOL)
                    self.run_singulate(col_img, main_mask, groups, d_img)
                    sing_start = time.time()
                    singulation_success = self.dl.record_success(
                        "singulation",
                        other_data=[c_img, vis_util_image, d_img])
                    singulation_time = time.time() - sing_start

                if cfg.EVALUATE:
                    reward = self.helper.get_reward(grasp_sucess,
                                                    singulation_time)
                    self.dl.record_reward(reward)

            #reset to start
            self.whole_body.move_to_go()
            self.gm.move_to_home()
            self.gm.position_head()
            time.sleep(3)

            c_img = self.cam.read_color_data()
            d_img = self.cam.read_depth_data()