예제 #1
0
 def to_mask(self, c_img, col_img, tol=cfg.COLOR_TOL):
     obj_mask = crop_img(
         c_img, bycoords=[self.ymin, self.ymax, self.xmin, self.xmax])
     obj_workspace_img = col_img.mask_binary(obj_mask)
     # fg = obj_workspace_img.foreground_mask(cfg.COLOR_TOL, ignore_black=True)
     fg = obj_workspace_img.foreground_mask(tol, ignore_black=True)
     return fg, obj_workspace_img
 def bbox_to_fg(self, bbox, c_img, col_img):
     obj_mask = crop_img(c_img,
                         bycoords=[bbox[1], bbox[3], bbox[0], bbox[2]])
     obj_workspace_img = col_img.mask_binary(obj_mask)
     fg = obj_workspace_img.foreground_mask(cfg.COLOR_TOL,
                                            ignore_black=True)
     return fg, obj_workspace_img
    def label_demo(self):
        self.gm.position_head()

        time.sleep(3)  #making sure the robot is finished moving

        c_img = self.cam.read_color_data()
        d_img = self.cam.read_depth_data()

        while not (c_img is None or d_img is None):
            path = "/home/autolab/Workspaces/michael_working/siemens_challenge/debug_imgs/web.png"
            cv2.imwrite(path, c_img)
            time.sleep(2)  #make sure new image is written before being read

            # print "\n new iteration"
            main_mask = crop_img(c_img, simple=True)
            col_img = ColorImage(c_img)
            workspace_img = col_img.mask_binary(main_mask)

            labels = self.web.label_image(path)

            obj = labels['objects'][0]
            bbox = obj['box']
            class_label = obj['class']

            #bbox has format [xmin, ymin, xmax, ymax]
            fg, obj_w = self.bbox_to_fg(bbox, c_img, col_img)
            cv2.imwrite("debug_imgs/test.png", obj_w.data)

            groups = get_cluster_info(fg)
            display_grasps(workspace_img, groups)

            group = groups[0]
            pose, rot = self.gm.compute_grasp(group.cm, group.dir, d_img)
            grasp_pose = self.gripper.get_grasp_pose(pose[0],
                                                     pose[1],
                                                     pose[2],
                                                     rot,
                                                     c_img=workspace_img.data)

            self.gm.execute_grasp(grasp_pose, class_num=class_label)

            #reset to start
            self.whole_body.move_to_go()
            self.gm.move_to_home()
            self.gm.position_head()
            time.sleep(3)

            c_img = self.cam.read_color_data()
            d_img = self.cam.read_depth_data()
    def full_web_demo(self):
        self.gm.position_head()

        time.sleep(3)  #making sure the robot is finished moving

        c_img = self.cam.read_color_data()
        d_img = self.cam.read_depth_data()

        while not (c_img is None or d_img is None):
            path = "/home/autolab/Workspaces/michael_working/siemens_challenge/debug_imgs/web.png"
            cv2.imwrite(path, c_img)
            time.sleep(2)  #make sure new image is written before being read

            # print "\n new iteration"
            main_mask = crop_img(c_img, simple=True)
            col_img = ColorImage(c_img)
            workspace_img = col_img.mask_binary(main_mask)

            labels = self.web.label_image(path)

            objs = labels['objects']
            bboxes = [Bbox(obj['box'], obj['class']) for obj in objs]
            single_objs = find_isolated_objects(bboxes)

            if len(single_objs) > 0:
                to_grasp = select_first_obj(single_objs)
                self.run_grasp(to_grasp, c_img, col_img, workspace_img, d_img)
            else:
                #for accurate singulation should have bboxes for all
                fg_imgs = [box.to_mask(c_img, col_img) for box in bboxes]
                groups = [get_cluster_info(fg[0])[0] for fg in fg_imgs]
                groups = merge_groups(groups, cfg.DIST_TOL)
                self.run_singulate(col_img, main_mask, groups, d_img)

            #reset to start
            self.whole_body.move_to_go()
            self.gm.move_to_home()
            self.gm.position_head()
            time.sleep(3)

            c_img = self.cam.read_color_data()
            d_img = self.cam.read_depth_data()
    def decision_demo(self):
        time.sleep(3)  #making sure the robot is finished moving

        sample_data_paths = [
            "debug_imgs/data_chris/test" + str(i) + ".png" for i in range(3)
        ]
        img_ind = 0
        c_img = cv2.imread(sample_data_paths[img_ind])

        while not (c_img is None):
            path = "/home/autolab/Workspaces/michael_working/siemens_challenge/debug_imgs/web.png"
            cv2.imwrite(path, c_img)
            time.sleep(2)  #make sure new image is written before being read

            # print "\n new iteration"
            main_mask = crop_img(c_img, simple=True)
            col_img = ColorImage(c_img)
            workspace_img = col_img.mask_binary(main_mask)

            labels = self.web.label_image(path)

            objs = labels['objects']
            bboxes = [Bbox(obj['box'], obj['class']) for obj in objs]
            single_objs = find_isolated_objects(bboxes)

            if len(single_objs) > 0:
                to_grasp = select_first_obj(single_objs)
                self.run_grasp(to_grasp, c_img, col_img, workspace_img)
            else:
                #for accurate singulation should have bboxes for all
                fg_imgs = [box.to_mask(c_img, col_img) for box in bboxes]
                groups = [get_cluster_info(fg[0])[0] for fg in fg_imgs]
                groups = merge_groups(groups, cfg.DIST_TOL)
                self.run_singulate(col_img, main_mask, groups)
            img_ind += 1
            c_img = cv2.imread(sample_data_paths[img_ind])
예제 #6
0
from tpc.perception.groups import Group
from tpc.perception.singulation import Singulation
from tpc.perception.crop import crop_img

from tpc.data_manager import DataManager

import tpc.config.config_tpc as cfg
import importlib
img = importlib.import_module(cfg.IMG_MODULE)
ColorImage = getattr(img, 'ColorImage')
BinaryImage = getattr(img, 'BinaryImage')

if __name__ == "__main__":
    c_img = cv2.imread("debug_imgs/singulate_fails/0/orig.png")

    main_mask = crop_img(c_img, use_preset=True)
    col_img = ColorImage(c_img)
    workspace_img = col_img.mask_binary(main_mask)

    groups = run_connected_components(workspace_img, viz=False)

    to_singulate = []
    to_grasp = []
    for group in groups:
        inner_groups = grasps_within_pile(col_img.mask_binary(group.mask))

        if len(inner_groups) == 0:
            to_singulate.append(group)
        else:
            for in_group in inner_groups:
                pose, rot = None, None
    def label_demo(self):
        """ Main method which executes the stuff we're interested in.
        
        Should apply to both the physical and simulated HSRs. Call as `python
        main/test_labeling.py`.
        """
        self.gm.position_head()
        time.sleep(3) #making sure the robot is finished moving
        c_img = self.cam.read_color_data()
        d_img = self.cam.read_depth_data()

        path = "/home/ron/siemens_sim/siemens_challenge/debug_imgs/web.png"
        cv2.imwrite(path, c_img)
        time.sleep(2) #make sure new image is written before being read

        # print "\n new iteration"
        main_mask = crop_img(c_img, simple=True)
        col_img = ColorImage(c_img)
        workspace_img = col_img.mask_binary(main_mask)

        labels = self.web.label_image(path)

        obj = labels['objects'][0]
        bbox = obj['box']
        class_label = obj['class']

        #bbox has format [xmin, ymin, xmax, ymax]
        fg, obj_w = self.bbox_to_fg(bbox, c_img, col_img)
        cv2.imwrite("debug_imgs/test.png", obj_w.data)

        groups = get_cluster_info(fg)
        display_grasps(workspace_img, groups)

        group = groups[0]
        print(d_img)
        pose,rot = self.gm.compute_grasp(group.cm, group.dir, d_img)
        pose = find_pose(pose)
        if pose == None:
            print("unable to find corresponding item.")
            sys.exit()

        a = tf.transformations.quaternion_from_euler(ai=-2.355,aj=-3.14,ak=0.0)
        b = tf.transformations.quaternion_from_euler(ai=0.0,aj=0.0,ak=1.57)

        base_rot = tf.transformations.quaternion_multiply(a,b)

        print("now about to get grasp pose, w/pose: {}, rot: {}".format(pose, rot))
        thread.start_new_thread(self.gripper.loop_broadcast,(pose,base_rot,rot))
        time.sleep(5)
        print("now calling execute_grasp w/grasp_pose: {}".format(grasp_pose))
        # IPython.embed()
        self.gm.execute_grasp("grasp_0")
        self.whole_body.move_end_effector_pose(geometry.pose(),"temp_bin")
        self.gripper.open_gripper()



        #reset to start
        self.whole_body.move_to_go()
        # self.gm.move_to_home()
        self.gm.position_head()
        time.sleep(3)
예제 #8
0
    def lego_demo(self):
        if not cfg.COLLECT_DATA:
            print("WARNING: NO DATA IS BEING COLLECTED")
            print("TO COLLECT DATA, CHANGE COLLECT_DATA IN config_tpc")

        self.dm = DataManager()
        self.get_new_grasp = True

        # DEBUG = False
        # if not DEBUG:
        #     self.gm.position_head()

        time.sleep(3)  #making sure the robot is finished moving
        c_img = self.cam.read_color_data()
        d_img = self.cam.read_depth_data()

        while not (c_img is None or d_img is None):
            print "\n new iteration"

            main_mask = crop_img(c_img, use_preset=True, arc=False)
            col_img = ColorImage(c_img)
            workspace_img = col_img.mask_binary(main_mask)

            self.dm.clear_traj()
            self.dm.update_traj("c_img", c_img)
            self.dm.update_traj("d_img", d_img)
            self.dm.update_traj("stop_condition", "crash")
            cv2.imwrite("debug_imgs/c_img.png", c_img)
            self.dm.update_traj("crop", workspace_img.data)

            a = time.time()
            groups = run_connected_components(workspace_img, viz=True)
            self.dm.update_traj("connected_components_time", time.time() - a)

            self.dm.update_traj("num_legos", self.get_int())

            self.dm.append_traj()

            print "num masses", len(groups)
            if len(groups) == 0:
                print("cleared workspace")
                self.dm.update_traj("stop_condition",
                                    self.get_success("clearing table"))
                self.dm.overwrite_traj()
                time.sleep(5)
                break

            to_grasp, to_singulate = self.clusters_to_actions(
                groups, col_img, d_img, workspace_img)

            self.whole_body.collision_world = self.collision_world
            if len(to_grasp) > 0:
                self.run_grasps(workspace_img, to_grasp)
            else:
                self.run_singulation(col_img, main_mask, d_img, to_singulate)

            self.dm.update_traj("notes", self.get_str())

            #reset to start
            self.whole_body.move_to_go()
            self.gm.move_to_home()
            self.gm.position_head()

            time.sleep(8)  #making sure the robot is finished moving
            c_img = self.cam.read_color_data()
            d_img = self.cam.read_depth_data()

            self.dm.update_traj("stop_condition", "none")
            self.dm.update_traj("c_img_result", c_img)
            self.dm.update_traj("d_img_result", d_img)
            self.dm.overwrite_traj()
from tpc.perception.cluster_registration import run_connected_components, display_grasps, \
    grasps_within_pile, hsv_classify
from tpc.perception.groups import Group

from tpc.perception.singulation import Singulation
from tpc.perception.crop import crop_img
from tpc.manipulation.primitives import GraspManipulator
from tpc.data_manager import DataManager
from il_ros_hsr.p_pi.bed_making.table_top import TableTop
from il_ros_hsr.core.rgbd_to_map import RGBD2Map

import tpc.config.config_tpc as cfg
import importlib

img = importlib.import_module(cfg.IMG_MODULE)
ColorImage = getattr(img, 'ColorImage')
BinaryImage = getattr(img, 'BinaryImage')

if __name__ == "__main__":
    for i in range(0, 4):
        path = "debug_imgs/data_chris/test" + str(i)
        curr_img = cv2.imread(path + ".png")
        main_mask = crop_img(curr_img, simple=True)
        col_img = ColorImage(curr_img)
        workspace_img = col_img.mask_binary(main_mask)
        cv2.imwrite(path + "crop.png", workspace_img.data)
        fg = workspace_img.foreground_mask(cfg.COLOR_TOL, ignore_black=True)
        cv2.imwrite(path + "mask.png", fg.data)
        groups = run_connected_components(workspace_img, viz=False)
        display_grasps(workspace_img, groups, name=path + "piles.png")
예제 #10
0
import numpy as np
import cv2
import IPython
from perception import ColorImage, BinaryImage
from tpc.perception.crop import crop_img

"""
Script to run crop on sample images
Can be used to fine tune crop if necessary
"""

if __name__ == "__main__":
	get_img = lambda ind: cv2.imread("debug_imgs/new_setup_crop/img" + str(ind) + ".png")
	write_img = lambda img, ind: cv2.imwrite("debug_imgs/new_setup_crop/img_o" + str(ind) + ".png", img)
	for i in range(11, 12):
		img = get_img(i)
		crop_mask = crop_img(img, use_preset=True)
		viz = ColorImage(img).mask_binary(crop_mask)
		write_img(viz.data, i)
    def lego_demo(self):

        self.rollout_data = []
        self.get_new_grasp = True

        if not DEBUG:
            self.gm.position_head()

        while True:
            time.sleep(1)  #making sure the robot is finished moving
            c_img = self.cam.read_color_data()
            d_img = self.cam.read_depth_data()
            cv2.imwrite("debug_imgs/c_img.png", c_img)
            print "\n new iteration"

            if (not c_img == None and not d_img == None):
                #label image
                data = self.wl.label_image(c_img)
                c_img = self.cam.read_color_data()

                grasp_boxes = []
                suction_boxes = []
                singulate_boxes = []

                for i in range(data['num_labels']):
                    bbox = data['objects'][i]['box']
                    classnum = data['objects'][i]['class']
                    classname = ['grasp', 'singulate', 'suction',
                                 'quit'][classnum]
                    if classname == "grasp":
                        grasp_boxes.append(bbox)
                    elif classname == "suction":
                        suction_boxes.append(bbox)
                    elif classname == "singulate":
                        singulate_boxes.append(bbox)
                    elif classname == "quit":
                        self.ds.save_rollout()
                        self.gm.move_to_home()
                self.ds.append_data_to_rollout(c_img, data)
                main_mask = crop_img(c_img)
                col_img = ColorImage(c_img)
                workspace_img = col_img.mask_binary(main_mask)

                grasps = []
                viz_info = []
                for i in range(len(grasp_boxes)):
                    bbox = grasp_boxes[i]
                    center_mass, direction = bbox_to_grasp(bbox, c_img, d_img)

                    viz_info.append([center_mass, direction])
                    pose, rot = self.gm.compute_grasp(center_mass, direction,
                                                      d_img)
                    grasps.append(
                        self.gripper.get_grasp_pose(pose[0],
                                                    pose[1],
                                                    pose[2],
                                                    rot,
                                                    c_img=workspace_img.data))

                suctions = []
                for i in range(len(suction_boxes)):
                    suctions.append("compute_suction?")

                if len(grasps) > 0 or len(suctions) > 0:
                    cv2.imwrite(
                        "grasps.png",
                        visualize(workspace_img, [v[0] for v in viz_info],
                                  [v[1] for v in viz_info]))
                    print "running grasps"

                    for grasp in grasps:
                        print "grasping", grasp
                        self.gm.execute_grasp(grasp)
                    print "running suctions"
                    for suction in suctions:
                        print "suctioning", suction
                        #execute suction
                elif len(singulate_boxes) > 0:
                    print("singulating")
                    bbox = singulate_boxes[0]
                    obj_mask = bbox_to_mask(bbox, c_img)
                    start, end = find_singulation(col_img, main_mask, obj_mask)
                    display_singulation(start, end, workspace_img)

                    self.gm.singulate(start, end, c_img, d_img)
                else:
                    print("cleared workspace")

                self.whole_body.move_to_go()
                self.gm.position_head()
예제 #12
0
    singulate_boxes = []

    for i in range(data['num_labels']):
        bbox = data['objects'][i]['box']
        classnum = data['objects'][i]['class']
        classname = ['grasp', 'singulate', 'suction', 'quit'][classnum]
        if classname == "grasp":
            grasp_boxes.append(bbox)
        elif classname == "suction":
            suction_boxes.append(bbox)
        elif classname == "singulate":
            singulate_boxes.append(bbox)
        elif classname == "quit":
            break

    main_mask = crop_img(c_img)
    col_img = ColorImage(c_img)
    workspace_img = col_img.mask_binary(main_mask)

    grasps = []
    viz_info = []
    for i in range(len(grasp_boxes)):
        bbox = grasp_boxes[i]
        center_mass, direction = bbox_to_grasp(bbox, c_img, d_img)

        viz_info.append([center_mass, direction])

    suctions = []
    for i in range(len(suction_boxes)):
        suctions.append("compute_suction?")
예제 #13
0
    def siemens_demo(self):
        self.gm.position_head()

        time.sleep(3)  #making sure the robot is finished moving

        c_img = self.cam.read_color_data()
        d_img = self.cam.read_depth_data()

        while not (c_img is None or d_img is None):
            path = "/home/autolab/Workspaces/michael_working/siemens_challenge/debug_imgs/web.png"
            cv2.imwrite(path, c_img)
            time.sleep(2)  #make sure new image is written before being read

            # print "\n new iteration"
            main_mask = crop_img(c_img, simple=True)
            col_img = ColorImage(c_img)
            workspace_img = col_img.mask_binary(main_mask)

            bboxes, vis_util_image = self.get_bboxes(path, col_img)
            if len(bboxes) == 0:
                print("Cleared the workspace")
                print("Add more objects, then resume")
                IPython.embed()
            else:
                box_viz = draw_boxes(bboxes, c_img)
                cv2.imwrite("debug_imgs/box.png", box_viz)
                single_objs = find_isolated_objects_by_overlap(bboxes)
                grasp_sucess = 1.0
                if len(single_objs) == 0:
                    single_objs = find_isolated_objects_by_distance(
                        bboxes, col_img)

                if len(single_objs) > 0:
                    to_grasp = select_first_obj(single_objs)
                    singulation_time = 0.0
                    self.run_grasp(to_grasp, c_img, col_img, workspace_img,
                                   d_img)
                    grasp_sucess = self.dl.record_success(
                        "grasp", other_data=[c_img, vis_util_image, d_img])
                else:
                    #for accurate singulation should have bboxes for all
                    fg_imgs = [box.to_mask(c_img, col_img) for box in bboxes]
                    groups = [get_cluster_info(fg[0])[0] for fg in fg_imgs]
                    groups = merge_groups(groups, cfg.DIST_TOL)
                    self.run_singulate(col_img, main_mask, groups, d_img)
                    sing_start = time.time()
                    singulation_success = self.dl.record_success(
                        "singulation",
                        other_data=[c_img, vis_util_image, d_img])
                    singulation_time = time.time() - sing_start

                if cfg.EVALUATE:
                    reward = self.helper.get_reward(grasp_sucess,
                                                    singulation_time)
                    self.dl.record_reward(reward)

            #reset to start
            self.whole_body.move_to_go()
            self.gm.move_to_home()
            self.gm.position_head()
            time.sleep(3)

            c_img = self.cam.read_color_data()
            d_img = self.cam.read_depth_data()