def __init__(self):
        '''
		Initialization class for a Policy

		Parameters
		----------
		yumi : An instianted yumi robot 
		com : The common class for the robot
		cam : An open bincam class

		debug : bool 

		A bool to indicate whether or not to display a training set point for 
		debuging. 

		'''
        self.cam = RGBD()
        self.com = COM()

        not_read = True
        while not_read:

            try:
                cam_info = self.cam.read_info_data()
                if (not cam_info == None):
                    not_read = False
            except:
                rospy.logerr('info not recieved')

        #self.side = 'BOTTOM'
        self.cam_info = cam_info
        self.cam = RGBD()
        self.com = COM()
Exemple #2
0
    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''

        self.robot = hsrb_interface.Robot()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')
        self.whole_body.move_to_joint_positions({'head_tilt_joint': -0.8})

        self.cam = RGBD()

        self.wl = Python_Labeler(cam=self.cam)

        self.gp = GraspPlanner()
Exemple #3
0
    def __init__(self, user_name=None, inject_noise=False, noise_scale=1.0):
        self.robot = hsrb_interface.Robot()

        self.noise = 0.1

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')
        self.gripper = self.robot.get('gripper')
        self.tl = TransformListener()
        self.cam = RGBD()
        time.sleep(5)
        self.b_d = Bottle_Detect(self.cam.read_info_data())

        self.start_recording = False
        self.stop_recording = False

        self.com = COM()

        if (not user_name == None):
            self.com.Options.setup(self.com.Options.root_dir, user_name)

        #self.com.go_to_initial_state(self.whole_body,self.gripper)

        #self.whole_body.move_to_joint_positions({'head_tilt_joint':-0.3})

        self.joystick = JoyStick_X(self.com,
                                   inject_noise=inject_noise,
                                   noise_scale=noise_scale)
        self.torque = Gripper_Torque()
        self.joints = Joint_Positions()
Exemple #4
0
    def __init__(self,com,features):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''


        self.pubTwist = rospy.Publisher('/hsrb/command_velocity',
                          Twist, queue_size=1)

        self.com = com

        self.cam = RGBD()

        self.trajectory = []
        self.features = features
    def __init__(self):

        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()

        self.com.go_to_initial_state(self.whole_body)

        self.grasp_count = 0

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        self.gp = GraspPlanner()
        self.gripper = Crane_Gripper(self.gp, self.cam, self.com.Options,
                                     self.robot.get('gripper'))
        self.suction = Suction_Gripper(self.gp, self.cam, self.com.Options,
                                       self.robot.get('suction'))

        self.gm = GraspManipulator(self.gp, self.gripper, self.suction,
                                   self.whole_body, self.omni_base, self.tl)

        self.web = Web_Labeler()
        print "after thread"
Exemple #6
0
    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''

        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        #PARAMETERS TO CHANGE
        self.side = 'TOP'

        self.r_count = 0

        self.grasp_count = 0
        self.success_count = 0

        self.true_count = 0
        self.grasp = True

        self.r_count = self.get_rollout_number()

        self.cam = RGBD()
        self.com = COM()

        self.joystick = JoyStick_X(self.com)

        if cfg.USE_WEB_INTERFACE:
            self.wl = Web_Labeler()
        else:
            self.wl = Python_Labeler(cam=self.cam)

        self.com.go_to_initial_state(self.whole_body)

        self.tt = TableTop()
        self.tt.find_table(self.robot)
        self.position_head()

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        #self.test_current_point()
        time.sleep(4)

        #thread.start_new_thread(self.ql.run,())
        print "after thread"
    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''

        self.robot = hsrb_interface.Robot()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.cam = RGBD()
        self.com = COM()

        #self.com.go_to_initial_state(self.whole_body)

        self.count = 425

        self.joystick = JoyStick_X(self.com)
        self.true_count = 0
Exemple #8
0
    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''

        self.robot = hsrb_interface.Robot()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.cam = RGBD()
        self.com = COM()

        self.com.go_to_initial_state(self.whole_body)

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()
        self.gp = GraspPlanner()

        self.suction = Suction(self.gp, self.cam)

        #thread.start_new_thread(self.ql.run,())
        print "after thread"
Exemple #9
0
    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''

        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()

        if cfg.USE_WEB_INTERFACE:
            self.wl = Web_Labeler()
        else:
            self.wl = Python_Labeler(self.cam)

        self.com.go_to_initial_state(self.whole_body)

        self.tt = TableTop()
        self.tt.find_table(self.robot)
        self.ins = InitialSampler(self.cam)

        self.grasp_count = 0

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        self.gp = GraspPlanner()

        self.gripper = Bed_Gripper(self.gp, self.cam, self.com.Options,
                                   self.robot.get('gripper'))

        self.g_detector = Analytic_Grasp()

        self.sn = Success_Net(self.whole_body, self.tt, self.cam,
                              self.omni_base)

        c_img = self.cam.read_color_data()

        #self.test_current_point()
        time.sleep(4)
        #thread.start_new_thread(self.ql.run,())
        print "after thread"
    def __init__(self, args):
        """For data collection of bed-making, NOT the deployment.

        Assumes we roll out the robot's policy via code (not via human touch).
        This is the 'slower' way where we have the python interface that the
        human clicks on to indicate grasping points. Good news is, our deployment
        code is probably going to be similar to this.

        For joystick: you only need it plugged in for the initial state sampler,
        which (at the moment) we are not even using.
        """
        self.robot = robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()
        self.omni_base = robot.get('omni_base')
        self.whole_body = robot.get('whole_body')
        self.cam = RGBD()
        self.com = COM()
        self.wl = Python_Labeler(cam=self.cam)

        # View mode: STANDARD (the way I was doing earlier), CLOSE (the way they want).
        self.view_mode = cfg.VIEW_MODE

        # Set up initial state, table, etc.
        self.com.go_to_initial_state(self.whole_body)
        self.tt = TableTop()

        # For now, a workaround. Ugly but it should do the job ...
        #self.tt.find_table(robot)
        self.tt.make_fake_ar()
        self.tt.find_table_workaround(robot)

        #self.ins = InitialSampler(self.cam)
        self.side = 'BOTTOM'
        self.grasp_count = 0

        # Bells and whistles; note the 'success check' to check if transitioning
        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()
        self.gp = GraspPlanner()
        self.gripper = Bed_Gripper(self.gp, self.cam, self.com.Options,
                                   robot.get('gripper'))
        self.sc = Success_Check(self.whole_body, self.tt, self.cam,
                                self.omni_base)

        time.sleep(4)
        print(
            "Finished creating BedMaker()! Get the bed set up and run bed-making!"
        )
        if cfg.INS_SAMPLE:
            print("TODO: we don't have sampling code here.")

        # When we start, spin this so we can check the frames. Then un-comment,
        # etc. It's the current hack we have to get around crummy AR marker detection.
        if args.phase == 1:
            print("Now doing rospy.spin() because phase = 1.")
            rospy.spin()
Exemple #11
0
    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''

        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()

        # if cfg.USE_WEB_INTERFACE:
        #     self.wl = Web_Labeler()
        # else:
        #     self.wl = Python_Labeler(cam = self.cam)

        self.com.go_to_initial_state(self.whole_body)

        self.tt = TableTop()
        self.tt.find_table(self.robot)

        self.grasp_count = 0

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        self.gp = GraspPlanner()

        self.gripper = Lego_Gripper(self.gp, self.cam, self.com.Options,
                                    self.robot.get('gripper'))

        self.RCNN = Depth_Object("bottle")
        #self.test_current_point()

        #thread.start_new_thread(self.ql.run,())
        print "after thread"
    def __init__(self):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot
        com : The common class for the robot
        cam : An open bincam class

        debug : bool

            A bool to indicate whether or not to display a training set point for
            debuging.

        '''

        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()

        if not DEBUG:
            self.com.go_to_initial_state(self.whole_body)

            self.tt = TableTop()
            self.tt.find_table(self.robot)

        self.wl = Python_Labeler(cam=self.cam)

        self.grasp_count = 0

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        self.ds = data_saver('tpc_rollouts/rollouts')

        self.gp = GraspPlanner()
        self.gripper = Crane_Gripper(self.gp, cam, options,
                                     robot.get('gripper'))

        self.gm = GraspManipulator(self.gp, self.gripper, self.whole_body,
                                   self.omni_base, self.tt)

        print "after thread"
    def __init__(self):
        self.cam = RGBD()

        not_read = True
        while not_read:

            try:
                cam_info = self.cam.read_info_data()
                if (not cam_info == None):
                    not_read = False
            except:
                rospy.logerr('info not recieved')

        self.pcm = PCM()

        self.pcm.fromCameraInfo(cam_info)
Exemple #14
0
    def __init__(self):
        """
        Class to run HSR lego task

        """

        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()
        # if not DEBUG:
        self.com.go_to_initial_state(self.whole_body)

        #     self.tt = TableTop()
        #     self.tt.find_table(self.robot)

        self.grasp_count = 0

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        self.gp = GraspPlanner()
        self.gripper = Crane_Gripper(self.gp, self.cam, self.com.Options,
                                     self.robot.get('gripper'))
        self.suction = Suction_Gripper(self.gp, self.cam, self.com.Options,
                                       self.robot.get('suction'))

        self.gm = GraspManipulator(self.gp, self.gripper, self.suction,
                                   self.whole_body, self.omni_base, self.tl)

        self.collision_world = hsrb_interface.collision_world.CollisionWorld(
            "global_collision_world")
        self.collision_world.remove_all()
        self.collision_world.add_box(x=.8,
                                     y=.9,
                                     z=0.5,
                                     pose=geometry.pose(y=1.4, z=0.15),
                                     frame_id='map')

        print "after thread"
Exemple #15
0
    def __init__(self):
        """
        Class to run HSR lego task

        """
        self.robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')

        self.side = 'BOTTOM'

        self.cam = RGBD()
        self.com = COM()

        self.com.go_to_initial_state(self.whole_body)

        self.grasp_count = 0
        self.helper = Helper(cfg)

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()

        self.gp = GraspPlanner()
        self.gripper = Crane_Gripper(self.gp, self.cam, self.com.Options,
                                     self.robot.get('gripper'))
        self.suction = Suction_Gripper(self.gp, self.cam, self.com.Options,
                                       self.robot.get('suction'))

        self.gm = GraspManipulator(self.gp, self.gripper, self.suction,
                                   self.whole_body, self.omni_base, self.tl)

        self.dl = DataLogger("stats_data/model_base", cfg.EVALUATE)

        self.web = Web_Labeler(cfg.NUM_ROBOTS_ON_NETWORK)

        model_path = 'main/output_inference_graph.pb'
        label_map_path = 'main/object-detection.pbtxt'
        self.det = Detector(model_path, label_map_path)

        print "after thread"
Exemple #16
0
    def __init__(self,options,name = None):
        '''
        Initialization class for a Policy

        Parameters
        ----------
        yumi : An instianted yumi robot 
        com : The common class for the robot
        cam : An open bincam class

        debug : bool 

            A bool to indicate whether or not to display a training set point for 
            debuging. 

        '''

        
        if(name == None):
            name = '07_14_15_27_17save.ckpt-12000'
        self.cam = RGBD()
        not_read = True
        while not_read:

            try:
                cam_info = self.cam.read_info_data()
                if(not cam_info == None):
                    not_read = False
            except:
                rospy.logerr('info not recieved')
       

        self.pcm = PCM()
        self.pcm.fromCameraInfo(cam_info)
        self.options = options
        self.detect = Detector(name)
        self.br = tf.TransformBroadcaster()
        self.gp = GraspPlanner()
Exemple #17
0
    def __init__(self):
        self.robot = hsrb_interface.Robot()

        self.noise = 0.1

        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')
        self.gripper = self.robot.get('gripper')
        self.tl = TransformListener()

        self.start_recording = False
        self.stop_recording = False

        self.com = COM()

        self.com.go_to_initial_state(self.whole_body, self.gripper)

        #self.whole_body.move_to_joint_positions({'head_tilt_joint':-0.3})

        self.cam = RGBD()

        self.joystick = JoyStick()
        self.torque = Gripper_Torque()
        self.joints = Joint_Positions()
Exemple #18
0
    T = 40

    setup_dir = "bed_making/"

    root_dir = "/media/autolab/1tb/"

    def __init__(self):

        self.setup(self.root_dir, self.setup_dir)


if __name__ == '__main__':

    #hsrb_interface.Robot()
    rospy.init_node('readJoy_node', anonymous=True)
    cam = RGBD()

    c_o = Corl_Options()
    count = 0

    while True:
        rgb_img = cam.read_color_data()
        count += 1

        if (not rgb_img == None):

            img_cropped = rgb_img[c_o.OFFSET_X:c_o.OFFSET_X + c_o.WIDTH,
                                  c_o.OFFSET_Y:c_o.OFFSET_Y + c_o.HEIGHT, :]

            cv2.imshow('debug', img_cropped)
Exemple #19
0
    def __init__(self, args):
        """For deploying the bed-making policy, not for data collection.

        We use all three variants (analytic, human, networks) here due to
        similarities in code structure.
        """
        self.args = args
        DEBUG = True

        # Set up the robot.
        self.robot = robot = hsrb_interface.Robot()
        if DEBUG:
            print("finished: hsrb_interface.Robot()...")
        self.rgbd_map = RGBD2Map()
        self.omni_base = self.robot.get('omni_base')
        if DEBUG:
            print("finished: robot.get(omni_base)...")
        self.whole_body = self.robot.get('whole_body')
        if DEBUG:
            print("finished: robot.get(whole_body)...")
        self.cam = RGBD()
        self.com = COM()
        self.wl = Python_Labeler(cam=self.cam)

        # Set up initial state, table, etc. Don't forget view mode!
        self.view_mode = BED_CFG.VIEW_MODE
        self.com.go_to_initial_state(self.whole_body)
        if DEBUG:
            print("finished: go_to_initial_state() ...")
        self.tt = TableTop()
        if DEBUG:
            print("finished: TableTop()...")

        # For now, a workaround. Ugly but it should do the job ...
        #self.tt.find_table(robot)
        self.tt.make_fake_ar()
        self.tt.find_table_workaround(robot)

        #self.ins = InitialSampler(self.cam)
        self.side = 'BOTTOM'
        self.grasp_count = 0
        self.b_grasp_count = 0
        self.t_grasp_count = 0

        # AH, build the YOLO network beforehand.
        g_cfg = BED_CFG.GRASP_CONFIG
        s_cfg = BED_CFG.SUCC_CONFIG
        self.yc = YOLO_CONV(options=s_cfg)
        self.yc.load_network()

        # Policy for grasp detection, using Deep Imitation Learning.
        # Or, actually, sometimes we will use humans or an analytic version.
        if DEBUG:
            self._test_variables()
        print("\nnow forming the GDetector with type {}".format(args.g_type))
        if args.g_type == 'network':
            self.g_detector = GDetector(g_cfg, BED_CFG, yc=self.yc)
        elif args.g_type == 'analytic':
            self.g_detector = Analytic_Grasp()  # TODO not implemented!
        elif args.g_type == 'human':
            print("Using a human, don't need to have a `g_detector`. :-)")

        if DEBUG:
            self._test_variables()
            print("\nnow making success net")
        self.sn = Success_Net(self.whole_body,
                              self.tt,
                              self.cam,
                              self.omni_base,
                              fg_cfg=s_cfg,
                              bed_cfg=BED_CFG,
                              yc=self.yc)

        # Bells and whistles.
        self.br = TransformBroadcaster()
        self.tl = TransformListener()
        self.gp = GraspPlanner()
        self.gripper = Bed_Gripper(self.gp, self.cam, self.com.Options,
                                   robot.get('gripper'))
        self.dp = DrawPrediction()

        # When we start, do rospy.spin() to check the frames (phase 1). Then re-run.
        # The current hack we have to get around crummy AR marker detection. :-(
        if DEBUG:
            self._test_variables()
        print("Finished with init method")
        time.sleep(4)
        if args.phase == 1:
            print("Now doing rospy.spin() because phase = 1.")
            rospy.spin()

        # For evaluating coverage.
        self.img_start = None
        self.img_final = None
        self.img_start2 = None
        self.img_final2 = None

        # For grasp offsets.
        self.apply_offset = False
Exemple #20
0
    def __init__(self):
        """
        For faster data collection where we manually simulate it.
        We move with our hands.  This will give us the large datasets we need.

        Supports both grasping and success net data collection. If doing the
        grasping, DON'T MAKE IT A SUCCESS CASE where the blanket is all the way
        over the corner. That way we can use the images for both grasping and
        as failure cases for the success net.
        
        For the success net data collection, collect data at roughly a 5:1 ratio
        of successes:failures, and make failures the borderline cases. Then we
        borrow data from the grasping network to make it 5:5 or 1:1 for the actual
        success net training process (use another script for forming the data).
        We use the keys on the joystick to indicate the success/failure class.
        """
        makedirs()
        self.robot = robot = hsrb_interface.Robot()
        self.rgbd_map = RGBD2Map()
        self.omni_base = self.robot.get('omni_base')
        self.whole_body = self.robot.get('whole_body')
        self.cam = RGBD()
        self.com = COM()
        self.wl = Python_Labeler(cam=self.cam)

        # ----------------------------------------------------------------------
        # PARAMETERS TO CHANGE  (well, really the 'side' and 'grasp' only).
        # We choose a fixed side and collect data from there, no switching.
        # Automatically saves based on `r_count` and counting the saved files.
        # `self.grasp` remains FIXED in the code, so we're either only
        # collecting grasp or only collecting success images.
        # ----------------------------------------------------------------------
        self.side = 'BOTTOM'  # CHANGE AS NEEDED
        self.grasp = False  # CHANGE AS NEEDED
        self.grasp_count = 0
        self.success_count = 0
        self.true_count = 0
        self.r_count = self.get_rollout_number()
        self.joystick = JoyStick_X(self.com)
        print("NOTE: grasp={} (success={}), side: {}, rollout num: {}".format(
            self.grasp, not self.grasp, self.side, self.r_count))
        print("Press X for any SUCCESS (class 0), Y for FAILURES (class 1).")

        # Set up initial state, table, etc.
        self.com.go_to_initial_state(self.whole_body)
        self.tt = TableTop()

        # For now, a workaround. Ugly but it should do the job ...
        #self.tt.find_table(robot)
        self.tt.make_fake_ar()
        self.tt.find_table_workaround(robot)

        self.br = tf.TransformBroadcaster()
        self.tl = TransformListener()
        time.sleep(4)

        # When we start, spin this so we can check the frames. Then un-comment,
        # etc. It's the current hack we have to get around crummy AR marker detection.
        #rospy.spin()

        # THEN we position the head since that involves moving the _base_.
        self.position_head()
Exemple #21
0
    def clean_up(self):
        self.sess.close()
        tfl.reset_default_graph()

    def broadcast_poses(self, poses):
        count = 0
        for pose in poses:
            print "POSE ", pose
            #IPython.embed()
            self.br.sendTransform((pose[0], pose[1] - 0.05, pose[2] + 0.06),
                                  (0.0, 0.0, 0.0, 1.0), rospy.Time.now(),
                                  'bottle' + str(count),
                                  'head_rgbd_sensor_link')
            count += 1


if __name__ == '__main__':
    rospy.init_node('readJoy_node', anonymous=True)

    print 'bottle'

    rgbd = RGBD()
    time.sleep(5)

    do = Bottle_Detect()
    while True:
        print "IMGAE"
        print rgbd.read_color_data()
        if (not rgbd.read_color_data() == None):
            do.detect_bottle(rgbd.read_color_data())
# make save dir
if not os.path.exists(SAVE_PATH):
    os.makedirs(SAVE_PATH)

# Orient the robot appropriately. make sure it is starting about a foot away from the long side of the bed, facing parallel to the bed
robot = hsrb_interface.Robot()
whole_body = robot.get('whole_body')
whole_body.move_to_go()
whole_body.move_to_joint_positions({'arm_flex_joint': -np.pi / 16.0})
whole_body.move_to_joint_positions({'head_pan_joint': np.pi / 2.0})
whole_body.move_to_joint_positions({'head_tilt_joint': -np.pi / 4.0})
whole_body.move_to_joint_positions({'arm_lift_joint': 0.120})

# start collecting data
rollout_num = 1  # do about 120 rollouts for decent dataset
camera = RGBD()
labels = list()
userinput = 'a'
while userinput != 'q':
    print("rollout", rollout_num)
    # generate random numbers for top/bottom, success/failure of rollout
    top = np.random.randint(2)
    success = np.random.randint(2)
    labels.append(str(success))  # classification labels
    print("Top (1)/Bottom (0): ", top)
    print("Success (1)/Failure (0): ", success)
    userinput = raw_input(
        "press enter to take the picture, or enter q to quit: ")
    if userinput != 'q':
        c_img = camera.read_color_data()
        d_img = camera.read_depth_data()