Example #1
0
    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''

        self.robot = hsrb_interface.Robot()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')
        self.whole_body.move_to_joint_positions({'head_tilt_joint': -0.8})

        self.cam = RGBD()

        self.wl = Python_Labeler(cam=self.cam)

        self.gp = GraspPlanner()
Example #2
0
class GraspTester():
    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''

        self.robot = hsrb_interface.Robot()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')
        self.whole_body.move_to_joint_positions({'head_tilt_joint': -0.8})

        self.cam = RGBD()

        self.wl = Python_Labeler(cam=self.cam)

        self.gp = GraspPlanner()

    def test_grasper(self):
        c_img = self.cam.read_color_data()
        data = self.wl.label_image(c_img)

        c_img = self.cam.read_color_data()
        d_img = self.cam.read_depth_data()

        point = self.get_points(data)

        self.gp.compute_grasp(point, d_img, c_img)

    def get_points(self, data):

        result = data['objects'][0]

        x_min = float(result['box'][0])
        y_min = float(result['box'][1])
        x_max = float(result['box'][2])
        y_max = float(result['box'][3])

        p_0 = (x_min, y_min)
        p_1 = (x_max, y_max)

        return (p_0, p_1)
    def __init__(self):

        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()

        self.com.go_to_initial_state(self.whole_body)

        self.grasp_count = 0

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        self.gp = GraspPlanner()
        self.gripper = Crane_Gripper(self.gp, self.cam, self.com.Options,
                                     self.robot.get('gripper'))
        self.suction = Suction_Gripper(self.gp, self.cam, self.com.Options,
                                       self.robot.get('suction'))

        self.gm = GraspManipulator(self.gp, self.gripper, self.suction,
                                   self.whole_body, self.omni_base, self.tl)

        self.web = Web_Labeler()
        print "after thread"
Example #4
0
    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''

        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()

        if cfg.USE_WEB_INTERFACE:
            self.wl = Web_Labeler()
        else:
            self.wl = Python_Labeler(self.cam)

        self.com.go_to_initial_state(self.whole_body)

        self.tt = TableTop()
        self.tt.find_table(self.robot)
        self.ins = InitialSampler(self.cam)

        self.grasp_count = 0

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        self.gp = GraspPlanner()

        self.gripper = Bed_Gripper(self.gp, self.cam, self.com.Options,
                                   self.robot.get('gripper'))

        self.g_detector = Analytic_Grasp()

        self.sn = Success_Net(self.whole_body, self.tt, self.cam,
                              self.omni_base)

        c_img = self.cam.read_color_data()

        #self.test_current_point()
        time.sleep(4)
        #thread.start_new_thread(self.ql.run,())
        print "after thread"
Example #5
0
    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''

        self.robot = hsrb_interface.Robot()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.cam = RGBD()
        self.com = COM()

        self.com.go_to_initial_state(self.whole_body)

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()
        self.gp = GraspPlanner()

        self.suction = Suction(self.gp, self.cam)

        #thread.start_new_thread(self.ql.run,())
        print "after thread"
Example #6
0
    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''

        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()

        if not DEBUG:
            self.com.go_to_initial_state(self.whole_body)

            self.tt = TableTop()
            self.tt.find_table(self.robot)

        self.grasp_count = 0

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        self.gp = GraspPlanner()

        self.gripper = Crane_Gripper(self.gp, self.cam, self.com.Options,
                                     self.robot.get('gripper'))

        print "after thread"
    def __init__(self, args):
        """For data collection of bed-making, NOT the deployment.

        Assumes we roll out the robot's policy via code (not via human touch).
        This is the 'slower' way where we have the python interface that the
        human clicks on to indicate grasping points. Good news is, our deployment
        code is probably going to be similar to this.

        For joystick: you only need it plugged in for the initial state sampler,
        which (at the moment) we are not even using.
        """
        self.robot = robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()
        self.omni_base = robot.get('omni_base')
        self.whole_body = robot.get('whole_body')
        self.cam = RGBD()
        self.com = COM()
        self.wl = Python_Labeler(cam=self.cam)

        # View mode: STANDARD (the way I was doing earlier), CLOSE (the way they want).
        self.view_mode = cfg.VIEW_MODE

        # Set up initial state, table, etc.
        self.com.go_to_initial_state(self.whole_body)
        self.tt = TableTop()

        # For now, a workaround. Ugly but it should do the job ...
        #self.tt.find_table(robot)
        self.tt.make_fake_ar()
        self.tt.find_table_workaround(robot)

        #self.ins = InitialSampler(self.cam)
        self.side = 'BOTTOM'
        self.grasp_count = 0

        # Bells and whistles; note the 'success check' to check if transitioning
        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()
        self.gp = GraspPlanner()
        self.gripper = Bed_Gripper(self.gp, self.cam, self.com.Options,
                                   robot.get('gripper'))
        self.sc = Success_Check(self.whole_body, self.tt, self.cam,
                                self.omni_base)

        time.sleep(4)
        print(
            "Finished creating BedMaker()! Get the bed set up and run bed-making!"
        )
        if cfg.INS_SAMPLE:
            print("TODO: we don't have sampling code here.")

        # When we start, spin this so we can check the frames. Then un-comment,
        # etc. It's the current hack we have to get around crummy AR marker detection.
        if args.phase == 1:
            print("Now doing rospy.spin() because phase = 1.")
            rospy.spin()
Example #8
0
    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''

        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()

        # if cfg.USE_WEB_INTERFACE:
        #     self.wl = Web_Labeler()
        # else:
        #     self.wl = Python_Labeler(cam = self.cam)

        self.com.go_to_initial_state(self.whole_body)

        self.tt = TableTop()
        self.tt.find_table(self.robot)

        self.grasp_count = 0

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        self.gp = GraspPlanner()

        self.gripper = Lego_Gripper(self.gp, self.cam, self.com.Options,
                                    self.robot.get('gripper'))

        self.RCNN = Depth_Object("bottle")
        #self.test_current_point()

        #thread.start_new_thread(self.ql.run,())
        print "after thread"
Example #9
0
    def __init__(self,options,name = None):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''

        
        if(name == None):
            name = '07_14_15_27_17save.ckpt-12000'
        self.cam = RGBD()
        not_read = True
        while not_read:

            try:
                cam_info = self.cam.read_info_data()
                if(not cam_info == None):
                    not_read = False
            except:
                rospy.logerr('info not recieved')
       

        self.pcm = PCM()
        self.pcm.fromCameraInfo(cam_info)
        self.options = options
        self.detect = Detector(name)
        self.br = tf.TransformBroadcaster()
        self.gp = GraspPlanner()
Example #10
0
    def __init__(self, args):
        self.args = args
        self.robot = robot = hsrb_interface.Robot()
        self.omni_base = robot.get('omni_base')
        self.whole_body = robot.get('whole_body')
        self.cam = RGBD()
        self.data = dict() # list of processed depth images and actions taken (I_t, a_t)

        # We don't use directly, but it makes a frame that we need for pixels -> world grasp poses.
        self.rgbd_map = RGBD2Map() # makes frame we need but we don't use it otherwise

        # TODO: eventually we need to remove this. Doing this to let us go from
        # camera coordinates to world frame, but we need HSR_CORE to support it.
        # But, should be easy because the grasp planner is pretty simple and we
        # only use it to compute the average depth values in a region.
        self.gp = GraspPlanner()
        self.gripper = Bed_Gripper(self.gp, self.cam, options=None, gripper=robot.get('gripper'))

        # Also this is a bit hacky. We want the HSR to rotate so that it's
        # _facing_ the bed now, whereas it started facing 'sideways'. Makes a
        # target pose for the robot so it goes there, before grasp executiuon.
        # TODO: make pose here. I think we can get away with rotating wrt the
        # map but in general we want to create our own poses.

        print("Initialized the data collector! Resting for 2 seconds...")
        time.sleep(2)

        # Hande the part about loading the network and pretrained model.
        HEAD  = '/nfs/diskstation/seita/bedmake_ssl'
        MODEL = 'resnet18_2018-11-18-09-50_000'
        PATH  = join(HEAD, MODEL, 'act_predictor.pt')

        # Get old args we used, and put into a newer Namespace object.
        with open(join(HEAD, MODEL, 'args.json'), 'r') as fh:
            saved_args = json.load(fh)
        self.netargs = opt._json_to_args(jsonfile=saved_args)
        
        # Load the pretrained model.
        model = opt.get_pretrained_model(self.netargs)
        self.act_predictor = ActPredictorNet(model, self.netargs)
        self.act_predictor.load_state_dict(torch.load(PATH))
        self.act_predictor.eval()

        self.transforms_valid = transforms.Compose([
            CT.Rescale((256,256)),
            CT.CenterCrop((224,224)),
            CT.ToTensor(),
            CT.Normalize(opt.MEAN, opt.STD),
        ])
Example #11
0
    def __init__(self):
        """
        Class to run HSR lego task

        """

        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()
        # if not DEBUG:
        self.com.go_to_initial_state(self.whole_body)

        #     self.tt = TableTop()
        #     self.tt.find_table(self.robot)

        self.grasp_count = 0

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        self.gp = GraspPlanner()
        self.gripper = Crane_Gripper(self.gp, self.cam, self.com.Options,
                                     self.robot.get('gripper'))
        self.suction = Suction_Gripper(self.gp, self.cam, self.com.Options,
                                       self.robot.get('suction'))

        self.gm = GraspManipulator(self.gp, self.gripper, self.suction,
                                   self.whole_body, self.omni_base, self.tl)

        self.collision_world = hsrb_interface.collision_world.CollisionWorld(
            "global_collision_world")
        self.collision_world.remove_all()
        self.collision_world.add_box(x=.8,
                                     y=.9,
                                     z=0.5,
                                     pose=geometry.pose(y=1.4, z=0.15),
                                     frame_id='map')

        print "after thread"
Example #12
0
    def __init__(self):
        """
        Class to run HSR lego task

        """
        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()

        self.com.go_to_initial_state(self.whole_body)

        self.grasp_count = 0
        self.helper = Helper(cfg)

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        self.gp = GraspPlanner()
        self.gripper = Crane_Gripper(self.gp, self.cam, self.com.Options,
                                     self.robot.get('gripper'))
        self.suction = Suction_Gripper(self.gp, self.cam, self.com.Options,
                                       self.robot.get('suction'))

        self.gm = GraspManipulator(self.gp, self.gripper, self.suction,
                                   self.whole_body, self.omni_base, self.tl)

        self.dl = DataLogger("stats_data/model_base", cfg.EVALUATE)

        self.web = Web_Labeler(cfg.NUM_ROBOTS_ON_NETWORK)

        model_path = 'main/output_inference_graph.pb'
        label_map_path = 'main/object-detection.pbtxt'
        self.det = Detector(model_path, label_map_path)

        print "after thread"
Example #13
0
    def __init__(self, args):
        """For deploying the bed-making policy, not for data collection.

        We use all three variants (analytic, human, networks) here due to
        similarities in code structure.
        """
        self.args = args
        DEBUG = True

        # Set up the robot.
        self.robot = robot = hsrb_interface.Robot()
        if DEBUG:
            print("finished: hsrb_interface.Robot()...")
        self.rgbd_map = RGBD2Map()
        self.omni_base = self.robot.get('omni_base')
        if DEBUG:
            print("finished: robot.get(omni_base)...")
        self.whole_body = self.robot.get('whole_body')
        if DEBUG:
            print("finished: robot.get(whole_body)...")
        self.cam = RGBD()
        self.com = COM()
        self.wl = Python_Labeler(cam=self.cam)

        # Set up initial state, table, etc. Don't forget view mode!
        self.view_mode = BED_CFG.VIEW_MODE
        self.com.go_to_initial_state(self.whole_body)
        if DEBUG:
            print("finished: go_to_initial_state() ...")
        self.tt = TableTop()
        if DEBUG:
            print("finished: TableTop()...")

        # For now, a workaround. Ugly but it should do the job ...
        #self.tt.find_table(robot)
        self.tt.make_fake_ar()
        self.tt.find_table_workaround(robot)

        #self.ins = InitialSampler(self.cam)
        self.side = 'BOTTOM'
        self.grasp_count = 0
        self.b_grasp_count = 0
        self.t_grasp_count = 0

        # AH, build the YOLO network beforehand.
        g_cfg = BED_CFG.GRASP_CONFIG
        s_cfg = BED_CFG.SUCC_CONFIG
        self.yc = YOLO_CONV(options=s_cfg)
        self.yc.load_network()

        # Policy for grasp detection, using Deep Imitation Learning.
        # Or, actually, sometimes we will use humans or an analytic version.
        if DEBUG:
            self._test_variables()
        print("\nnow forming the GDetector with type {}".format(args.g_type))
        if args.g_type == 'network':
            self.g_detector = GDetector(g_cfg, BED_CFG, yc=self.yc)
        elif args.g_type == 'analytic':
            self.g_detector = Analytic_Grasp()  # TODO not implemented!
        elif args.g_type == 'human':
            print("Using a human, don't need to have a `g_detector`. :-)")

        if DEBUG:
            self._test_variables()
            print("\nnow making success net")
        self.sn = Success_Net(self.whole_body,
                              self.tt,
                              self.cam,
                              self.omni_base,
                              fg_cfg=s_cfg,
                              bed_cfg=BED_CFG,
                              yc=self.yc)

        # Bells and whistles.
        self.br = TransformBroadcaster()
        self.tl = TransformListener()
        self.gp = GraspPlanner()
        self.gripper = Bed_Gripper(self.gp, self.cam, self.com.Options,
                                   robot.get('gripper'))
        self.dp = DrawPrediction()

        # When we start, do rospy.spin() to check the frames (phase 1). Then re-run.
        # The current hack we have to get around crummy AR marker detection. :-(
        if DEBUG:
            self._test_variables()
        print("Finished with init method")
        time.sleep(4)
        if args.phase == 1:
            print("Now doing rospy.spin() because phase = 1.")
            rospy.spin()

        # For evaluating coverage.
        self.img_start = None
        self.img_final = None
        self.img_start2 = None
        self.img_final2 = None

        # For grasp offsets.
        self.apply_offset = False
Example #14
0
class BedMaker():
    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''

        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()

        if not DEBUG:
            self.com.go_to_initial_state(self.whole_body)

            self.tt = TableTop()
            self.tt.find_table(self.robot)

        self.grasp_count = 0

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        self.gp = GraspPlanner()

        self.gripper = Crane_Gripper(self.gp, self.cam, self.com.Options,
                                     self.robot.get('gripper'))

        print "after thread"

    def find_mean_depth(self, d_img):
        '''
        Evaluates the current policy and then executes the motion 
        specified in the the common class
        '''

        indx = np.nonzero(d_img)

        mean = np.mean(d_img[indx])

        return

    def lego_demo(self):

        self.rollout_data = []
        self.get_new_grasp = True

        if not DEBUG:
            self.position_head()
        b = time.time()
        while True:

            time.sleep(1)  #making sure the robot is finished moving

            a = time.time()
            c_img = self.cam.read_color_data()
            d_img = self.cam.read_depth_data()
            cv2.imwrite("debug_imgs/c_img.png", c_img)
            print "time to get images", time.time() - a
            print "\n new iteration"
            if (not c_img == None and not d_img == None):

                c_ms, dirs, _ = run_connected_components(c_img)
                img = draw(c_img, c_ms, dirs)

                # # IPython.embed()
                for c_m, direction in zip(c_ms, dirs):
                    pose, rot = self.compute_grasp(c_m, direction, d_img)
                    rot -= pi / 2.0
                    print "pose, rot:", pose, rot

                ####DETERMINE WHAT OBJECT TO GRASP

                grasp_name = self.gripper.get_grasp_pose(pose[0],
                                                         pose[1],
                                                         pose[2],
                                                         rot,
                                                         c_img=c_img)

                self.execute_grasp(grasp_name)

                self.whole_body.move_to_go()
                self.position_head()

    def execute_grasp(self, grasp_name):
        self.gripper.open_gripper()

        self.whole_body.end_effector_frame = 'hand_palm_link'

        self.whole_body.move_end_effector_pose(geometry.pose(), grasp_name)

        self.gripper.close_gripper()
        self.whole_body.move_end_effector_pose(geometry.pose(z=-0.1),
                                               grasp_name)

        self.whole_body.move_end_effector_pose(geometry.pose(z=-0.1),
                                               'head_down')

        self.gripper.open_gripper()

    def compute_grasp(self, c_m, direction, d_img):

        if direction:
            rot = 0.0
        else:
            rot = 1.57

        x = c_m[1]
        y = c_m[0]

        z_box = d_img[y - 20:y + 20, x - 20:x + 20]

        z = self.gp.find_mean_depth(z_box)

        return [x, y, z], rot

    def singulate(self, start, end, c_img, d_img):
        # [355.9527559055119, 123.53543307086613, 977.26812500000005] 0.0
        rot = np.pi / 2 + np.arctan2(end[0] - start[0], end[1] - start[1])

        self.gripper.close_gripper()
        # self.go_to_point(start, rot, c_img, d_img)
        # self.go_to_point(end, rot, c_img, d_img)

        y, x = start
        z_box = d_img[y - 20:y + 20, x - 20:x + 20]
        z = self.gp.find_mean_depth(z_box)
        # above_start_pose_name = self.gripper.get_grasp_pose(x,y,z,rot,c_img=c_img)
        start_pose_name = self.gripper.get_grasp_pose(x,
                                                      y,
                                                      z,
                                                      rot,
                                                      c_img=c_img)

        y, x = end
        z_box = d_img[y - 20:y + 20, x - 20:x + 20]
        z = self.gp.find_mean_depth(z_box)
        end_pose_name = self.gripper.get_grasp_pose(x, y, z, rot, c_img=c_img)

        # raw_input("Click enter to move to " + above_start_pose_name)
        # self.whole_body.move_end_effector_pose(geometry.pose(), start_pose_name)
        # raw_input("Click enter to singulate from " + start_pose_name)
        print "singulating", start_pose_name
        self.whole_body.move_end_effector_pose(geometry.pose(z=-0.05),
                                               start_pose_name)
        self.whole_body.move_end_effector_pose(geometry.pose(z=-.01),
                                               start_pose_name)
        # raw_input("Click enter to singulate to " + end_pose_name)
        print "singulating", end_pose_name
        self.whole_body.move_end_effector_pose(geometry.pose(z=-.01),
                                               end_pose_name)

        self.gripper.open_gripper()

    def go_to_point(self, point, rot, c_img, d_img):
        y, x = point
        z_box = d_img[y - 20:y + 20, x - 20:x + 20]
        z = self.gp.find_mean_depth(z_box)
        print "singulation pose:", x, y, z
        pose_name = self.gripper.get_grasp_pose(x, y, z, rot, c_img=c_img)
        raw_input("Click enter to move to " + pose_name)
        self.whole_body.move_end_effector_pose(geometry.pose(), pose_name)

    def position_head(self):

        self.tt.move_to_pose(self.omni_base, 'lower_start')
        self.whole_body.move_to_joint_positions({'head_tilt_joint': -0.8})
Example #15
0
class YoloDetect():

    def __init__(self,options,name = None):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''

        
        if(name == None):
            name = '07_14_15_27_17save.ckpt-12000'
        self.cam = RGBD()
        not_read = True
        while not_read:

            try:
                cam_info = self.cam.read_info_data()
                if(not cam_info == None):
                    not_read = False
            except:
                rospy.logerr('info not recieved')
       

        self.pcm = PCM()
        self.pcm.fromCameraInfo(cam_info)
        self.options = options
        self.detect = Detector(name)
        self.br = tf.TransformBroadcaster()
        self.gp = GraspPlanner()


    def check_depth(self,p_list,d_img):

        w,h = d_img.shape

        color_img = np.zeros([w,h,3])

        color_img[:,:,0] = d_img*(255.0/float(np.max(d_img)))

        for p in p_list:
            print p
            color_img[p[1]-5:p[1]+5,p[0]-5:5+p[0],1] = 255.0

        cv2.imshow('debug',color_img)
        cv2.waitKey(30)

    def get_detect(self,c_img,d_img):
        '''
        Evaluates the current policy and then executes the motion 
        specified in the the common class
        '''


        results = self.detect.numpy_detector(c_img)

        self.poses = []
        #IPython.embed()
        
        p_list = []
        for result in results:
            print result

            x = int(result['box'][0])
            y = int(result['box'][1])
            w = int(result['box'][2] / 2)
            h = int(result['box'][3] / 2)

            p_list.append([x,y])

            #Crop D+img
            d_img_c = d_img[y-h:y+h,x-w,:x+w]
            depth = self.gp.find_mean_depth(d_img_c)
            self.poses.append([result['class'],self.convert_crop([x,y,depth])])

        self.check_depth(p_list,d_img)

    def convert_crop(self,pose):

        pose[0] = self.options.OFFSET_Y + pose[0]
        pose[1] = self.options.OFFSET_X + pose[1]

        return pose
            


    def broadcast_poses(self):
        #while True: 
        poses = self.poses
        count = 0
        for pose in poses:
            
            num_pose = pose[1]
            label = pose[0]

            td_points = self.pcm.projectPixelTo3dRay((num_pose[0],num_pose[1]))
            pose = np.array([td_points[0],td_points[1],0.001*num_pose[2]])
            

            self.br.sendTransform((td_points[0], td_points[1], pose[2]),
                    (0.0, 0.0, 0.0, 1.0),
                    rospy.Time.now(),
                    label,
                    'head_rgbd_sensor_rgb_frame')
            count += 1
Example #16
0
    side = 'BOTTOM'

    cam = RGBD()
    com = COM()

    com.go_to_initial_state(whole_body)

    tt = TableTop()
    tt.find_table(robot)

    grasp_count = 0

    br = tf.TransformBroadcaster()
    tl = TransformListener()

    gp = GraspPlanner()
    gripper = Crane_Gripper(gp, cam, com.Options, robot.get('gripper'))
    suction = Suction_Gripper(gp, cam, com.Options, robot.get('suction'))
    gm = GraspManipulator(gp, gripper, suction, whole_body, omni_base, tt)
    gm.position_head()

    print "after thread"
    curr_offsets = np.array([0, 0, -0.5])
    curr_rot = np.array([0.0,0.0,1.57])

    while True:
        label = id_generator()
        tt.make_new_pose(curr_offsets,label,rot = curr_rot)
        whole_body.move_end_effector_pose(geometry.pose(z=-0.1), label)
        delta = raw_input()
        while not (delta in ["+x", "-x", "+y", "-y", "+z", "-z"]):