예제 #1
0
 def render(self, mode='human'):
     if self._gym_cam is None:
         # Add the camera to the scene
         cam_placeholder = Dummy('cam_cinematic_placeholder')
         self._gym_cam = VisionSensor.create([640, 360])
         self._gym_cam.set_pose(cam_placeholder.get_pose())
         self._gym_cam.set_render_mode(RenderMode.OPENGL3_WINDOWED)
예제 #2
0
    def __init__(self,
                 task_class,
                 observation_mode='state',
                 render_mode: Union[None, str] = None):
        self._observation_mode = observation_mode
        self._render_mode = render_mode
        obs_config = ObservationConfig()
        if observation_mode == 'state':
            obs_config.set_all_high_dim(False)
            obs_config.set_all_low_dim(True)
        elif observation_mode == 'vision':
            obs_config.set_all(True)
        else:
            raise ValueError('Unrecognised observation_mode: %s.' %
                             observation_mode)
        action_mode = ActionMode(ArmActionMode.ABS_JOINT_VELOCITY)
        self.env = Environment(action_mode,
                               obs_config=obs_config,
                               headless=True)
        self.env.launch()
        self.task = self.env.get_task(task_class)

        _, obs = self.task.reset()

        self.action_space = spaces.Box(low=-1.0,
                                       high=1.0,
                                       shape=(self.env.action_size, ))

        if observation_mode == 'state':
            self.observation_space = spaces.Box(
                low=-np.inf, high=np.inf, shape=obs.get_low_dim_data().shape)
        elif observation_mode == 'vision':
            self.observation_space = spaces.Dict({
                "state":
                spaces.Box(low=-np.inf,
                           high=np.inf,
                           shape=obs.get_low_dim_data().shape),
                "left_shoulder_rgb":
                spaces.Box(low=0, high=1, shape=obs.left_shoulder_rgb.shape),
                "right_shoulder_rgb":
                spaces.Box(low=0, high=1, shape=obs.right_shoulder_rgb.shape),
                "wrist_rgb":
                spaces.Box(low=0, high=1, shape=obs.wrist_rgb.shape),
                "front_rgb":
                spaces.Box(low=0, high=1, shape=obs.front_rgb.shape),
            })

        if render_mode is not None:
            # Add the camera to the scene
            cam_placeholder = Dummy('cam_cinematic_placeholder')
            self._gym_cam = VisionSensor.create([640, 360])
            self._gym_cam.set_pose(cam_placeholder.get_pose())
            if render_mode == 'human':
                self._gym_cam.set_render_mode(RenderMode.OPENGL3_WINDOWED)
            else:
                self._gym_cam.set_render_mode(RenderMode.OPENGL3)
예제 #3
0
    def test_set_get_explicit_handling(self):
        cam = VisionSensor.create((640, 480))
        flag_orig = cam.get_explicit_handling()

        cam.set_explicit_handling(value=1)
        flag = cam.get_explicit_handling()
        self.assertEqual(flag, 1)

        cam.set_explicit_handling(value=0)
        flag = cam.get_explicit_handling()
        self.assertEqual(flag, 0)

        cam.set_explicit_handling(flag_orig)
        cam.remove()
예제 #4
0
 def test_create(self):
     cam = VisionSensor.create([640, 480],
                               perspective_mode=True,
                               view_angle=35.0,
                               near_clipping_plane=0.25,
                               far_clipping_plane=5.0,
                               render_mode=RenderMode.OPENGL3)
     self.assertEqual(cam.capture_rgb().shape, (480, 640, 3))
     self.assertEqual(cam.get_perspective_mode(),
                      PerspectiveMode.PERSPECTIVE)
     self.assertAlmostEqual(cam.get_perspective_angle(), 35.0, 3)
     self.assertEqual(cam.get_near_clipping_plane(), 0.25)
     self.assertEqual(cam.get_far_clipping_plane(), 5.0)
     self.assertEqual(cam.get_render_mode(), RenderMode.OPENGL3)
예제 #5
0
    def test_handle_explicitly(self):
        cam = VisionSensor.create((640, 480))

        # blank image
        rgb = cam.capture_rgb()
        self.assertEqual(rgb.sum(), 0)

        # non-blank image
        cam.set_explicit_handling(value=1)
        cam.handle_explicitly()
        rgb = cam.capture_rgb()
        self.assertNotEqual(rgb.sum(), 0)

        cam.remove()
예제 #6
0
 def render(self, mode='human'):
     '''gym render function. To render the simulator during simulation, call render(mode='human') once.
     To create rgb pictures, call the function every time you want to render a frame.'''
     if self._gym_cam is None:
         # Add the camera to the scene
         cam_placeholder = Dummy.create()
         cam_placeholder.set_pose([0, -0.5, 5, 1, 0, 0, 0])
         self._gym_cam = VisionSensor.create([640, 360])
         self._gym_cam2 = VisionSensor('Vision_sensor')
         self._gym_cam.set_pose(cam_placeholder.get_pose())
         self._gym_cam.set_render_mode(RenderMode.OPENGL3_WINDOWED)
         self._gym_cam2.set_render_mode(RenderMode.OPENGL3_WINDOWED)
         if mode == "rgb_array":
             self._gym_cam.set_render_mode(RenderMode.OPENGL3)
     if mode == "rgb_array":
         return self._gym_cam.capture_rgb()
예제 #7
0
def main(argv):

    obs_config = ObservationConfig(record_gripper_closing=True)
    obs_config.set_all(False)

    vrc = rand_every = None
    frequency = 0
    if FLAGS.domain_randomization:
        vrc = VisualRandomizationConfig(FLAGS.textures_path)
        rand_every = RandomizeEvery.TRANSITION
        frequency = 10

    env = Environment(ActionMode(), obs_config=obs_config,
                      randomize_every=rand_every, frequency=frequency,
                      visual_randomization_config=vrc, headless=FLAGS.headless)
    env.launch()

    # Add the camera to the scene
    cam_placeholder = Dummy('cam_cinematic_placeholder')
    cam = VisionSensor.create(FLAGS.camera_resolution)
    cam.set_pose(cam_placeholder.get_pose())
    cam.set_parent(cam_placeholder)

    cam_motion = CircleCameraMotion(cam, Dummy('cam_cinematic_base'), 0.005)
    tr = TaskRecorder(env, cam_motion, fps=30)

    if len(FLAGS.tasks) > 0:
        task_names = FLAGS.tasks
    else:
        task_names = [t.split('.py')[0] for t in os.listdir(TASKS_PATH)
                      if t != '__init__.py' and t.endswith('.py')]
    task_classes = [task_file_to_task_class(
        task_file) for task_file in task_names]

    for i, (name, cls) in enumerate(zip(task_names, task_classes)):
        good = tr.record_task(cls)
        if FLAGS.individual and good:
            tr.save(os.path.join(FLAGS.save_dir, '%s.avi' % name))

    if not FLAGS.individual:
        tr.save(os.path.join(FLAGS.save_dir, 'recorded_tasks.avi'))
    env.shutdown()
예제 #8
0
 def __init__(self, machine, camera, state="state"):
     super(CustomEnv, self).__init__()
     # Define action and observation space
     # They must be gym.spaces objects
     self.machine = machine
     self.camera = camera
     self.action_space = spaces.Discrete(N_ACTIONS)
     if state == "state":
         self.observation_space = spaces.Box(low=-10,
                                             high=10,
                                             shape=(1, 15))
     elif state == "vision":
         self.observation_space = spaces.Box(low=0,
                                             high=1,
                                             shape=(128, 128, 3))
     self.min_force = 1
     cam_placeholder = Dummy('cam_cinematic_placeholder')
     self._gym_cam = VisionSensor.create([640, 360])
     self._gym_cam.set_pose(cam_placeholder.get_pose())
     self._gym_cam.set_render_mode(RenderMode.OPENGL3_WINDOWED)
     # self._gym_cam.set_render_mode(RenderMode.OPENGL3)
     self.state_rep = state
예제 #9
0
    def render(self, mode='human'):
        # todo render available at any time
        if self._render_mode is None:
            self._render_mode = mode
            # Add the camera to the scene
            cam_placeholder = Dummy('cam_cinematic_placeholder')
            self._gym_cam = VisionSensor.create([640, 360])
            self._gym_cam.set_pose(cam_placeholder.get_pose())
            if mode == 'human':
                self._gym_cam.set_render_mode(RenderMode.OPENGL3_WINDOWED)
            else:
                self._gym_cam.set_render_mode(RenderMode.OPENGL3)

        if mode != self._render_mode:
            raise ValueError(
                'The render mode must match the render mode selected in the '
                'constructor. \nI.e. if you want "human" render mode, then '
                'create the env by calling: '
                'gym.make("reach_target-state-v0", render_mode="human").\n'
                'You passed in mode %s, but expected %s.' %
                (mode, self._render_mode))
        if mode == 'rgb_array':
            return self._gym_cam.capture_rgb()
예제 #10
0
    def __init__(self, task_class, observation_mode='state', randomization_mode="none", 
        rand_config=None, img_size=256, special_start=[], fixed_grip=-1, force_randomly_place=False, force_change_position=False, sparse=False, not_special_p = 0, ground_p = 0, special_is_grip=False, altview=False, procedural_ind=0, procedural_mode='same', procedural_set = [], is_mesh_obs=False, blank=False, COLOR_TUPLE=[1,0,0]):
        # blank is 0s agent. 
        self.blank = blank
        #altview is whether to have second camera angle or not. True/False, "both" to concatentae the observations. 
        self.altview=altview
        self.img_size=img_size
        self.sparse = sparse
        self.task_class = task_class
        self._observation_mode = observation_mode
        self._randomization_mode = randomization_mode
        #special start is a list of actions to take at the beginning.
        self.special_start = special_start
        #fixed_grip temp hack for keeping the gripper a certain way. Change later. 0 for fixed closed, 0.1 for fixed open, -1 for not fixed
        self.fixed_grip = fixed_grip
        #to force the task to be randomly placed
        self.force_randomly_place = force_randomly_place
        #force the task to change position in addition to rotation
        self.force_change_position = force_change_position



        obs_config = ObservationConfig()
        if observation_mode == 'state':
            obs_config.set_all_high_dim(False)
            obs_config.set_all_low_dim(True)
        elif observation_mode == 'vision' or observation_mode=="visiondepth" or observation_mode=="visiondepthmask":
            # obs_config.set_all(True)
            obs_config.set_all_high_dim(False)
            obs_config.set_all_low_dim(True)
        else:
            raise ValueError(
                'Unrecognised observation_mode: %s.' % observation_mode)

        action_mode = ActionMode(ArmActionMode.DELTA_EE_POSE_PLAN_NOQ)
        print("using delta pose pan")

        if randomization_mode == "random":
            objs = ['target', 'boundary', 'Floor', 'Roof', 'Wall1', 'Wall2', 'Wall3', 'Wall4', 'diningTable_visible']
            if rand_config is None:
                assert False
            self.env = DomainRandomizationEnvironment(
                action_mode, obs_config=obs_config, headless=True,
                randomize_every=RandomizeEvery.EPISODE, frequency=1,
                visual_randomization_config=rand_config
            )
        else:
            self.env = Environment(
                action_mode, obs_config=obs_config, headless=True
            )
        self.env.launch()
        self.task = self.env.get_task(task_class)

        # Probability. Currently used for probability that pick and lift task will start off gripper at a certain location (should probs be called non_special p)
        self.task._task.not_special_p = not_special_p
        # Probability that ground goal.
        self.task._task.ground_p = ground_p 
        # For the "special" case, whether to grip the object or just hover above it. 
        self.task._task.special_is_grip = special_is_grip
        # for procedural env
        self.task._task.procedural_ind = procedural_ind
        # procedural mode: same, increase, or random. 
        self.task._task.procedural_mode = procedural_mode
        # ideally a list-like object, dictates the indices to sample from each episode. 
        self.task._task.procedural_set = procedural_set
        # if state obs is mesh obs
        self.task._task.is_mesh_obs = is_mesh_obs

        self.task._task.sparse = sparse
        self.task._task.COLOR_TUPLE = COLOR_TUPLE
        
        _, obs = self.task.reset()

        cam_placeholder = Dummy('cam_cinematic_placeholder')
        cam_pose = cam_placeholder.get_pose().copy()
        #custom pose
        cam_pose = [ 1.59999931,  0. ,         2.27999949 , 0.65328157, -0.65328145, -0.27059814, 0.27059814]
        cam_pose[0] = 1
        cam_pose[2] = 1.5
        self.frontcam = VisionSensor.create([img_size, img_size])
        self.frontcam.set_pose(cam_pose)

        self.frontcam.set_render_mode(RenderMode.OPENGL)
        self.frontcam.set_perspective_angle(60)
        self.frontcam.set_explicit_handling(1)

        self.frontcam_mask = VisionSensor.create([img_size, img_size])
        self.frontcam_mask.set_pose(cam_pose)
        self.frontcam_mask.set_render_mode(RenderMode.OPENGL_COLOR_CODED)
        self.frontcam_mask.set_perspective_angle(60)
        self.frontcam_mask.set_explicit_handling(1)

        if altview:
            alt_pose = [0.25    , -0.65    ,  1.5,   0, 0.93879825 ,0.34169483 , 0]
            self.altcam = VisionSensor.create([img_size, img_size])
            self.altcam.set_pose(alt_pose)
            self.altcam.set_render_mode(RenderMode.OPENGL)
            self.altcam.set_perspective_angle(60)
            self.altcam.set_explicit_handling(1)
            
            self.altcam_mask = VisionSensor.create([img_size, img_size])
            self.altcam_mask.set_pose(alt_pose)
            self.altcam_mask.set_render_mode(RenderMode.OPENGL_COLOR_CODED)
            self.altcam_mask.set_perspective_angle(60)
            self.altcam_mask.set_explicit_handling(1)


        self.action_space = spaces.Box(
            low=-1.0, high=1.0, shape=(action_mode.action_size,))

        if observation_mode == 'state':
            self.observation_space = spaces.Dict({
                "observation": spaces.Box(
                low=-np.inf, high=np.inf, shape=self.task._task.get_state_obs().shape),
                "achieved_goal": spaces.Box(
                low=-np.inf, high=np.inf, shape=self.task._task.get_achieved_goal().shape),
                'desired_goal': spaces.Box(
                low=-np.inf, high=np.inf, shape=self.task._task.get_desired_goal().shape)
            })
        # Use the frontvision cam
        elif observation_mode == 'vision':
            self.frontcam.handle_explicitly()
            self.observation_space = spaces.Dict({
                "state": spaces.Box(
                    low=-np.inf, high=np.inf,
                    shape=obs.get_low_dim_data().shape),
                "observation": spaces.Box(
                    low=0, high=1, shape=self.frontcam.capture_rgb().transpose(2,0,1).flatten().shape,
                    )
                })
            if altview == "both":
                example = self.frontcam.capture_rgb().transpose(2,0,1).flatten()
                self.observation_space = spaces.Dict({
                    "state": spaces.Box(
                        low=-np.inf, high=np.inf,
                        shape=obs.get_low_dim_data().shape),
                    "observation": spaces.Box(
                        low=0, high=1, shape=np.array([example,example]).shape,
                        )
                    })
        elif observation_mode == 'visiondepth':

            self.frontcam.handle_explicitly()
            self.observation_space = spaces.Dict({
                "state": spaces.Box(
                    low=-np.inf, high=np.inf,
                    shape=obs.get_low_dim_data().shape),
                "observation": spaces.Box(
                    #thinking about not flattening the shape, because primarily used for dataset and not for training
                    low=0, high=1, shape=np.array([self.frontcam.capture_rgb().transpose(2,0,1), self.frontcam.capture_depth()[None,...]]).shape,
                    )
                })
        elif observation_mode == 'visiondepthmask':

            self.frontcam.handle_explicitly()
            self.frontcam_mask.handle_explicitly()
            self.observation_space = spaces.Dict({
                "state": spaces.Box(
                    low=-np.inf, high=np.inf,
                    shape=obs.get_low_dim_data().shape),
                "observation": spaces.Box(
                    #thinking about not flattening the shape, because primarily used for dataset and not for training
                    low=0, high=1, shape=np.array([self.frontcam.capture_rgb().transpose(2,0,1), self.frontcam.capture_depth()[None,...], rgb_handles_to_mask(self.frontcam_mask.capture_rgb())]).shape,
                    )
                })
            if altview == "both":
                self.observation_space = spaces.Dict({
                    "state": spaces.Box(
                        low=-np.inf, high=np.inf,
                        shape=obs.get_low_dim_data().shape),
                    "observation": spaces.Box(
                        #thinking about not flattening the shape, because primarily used for dataset and not for training
                        low=0, high=1, shape=np.array([self.frontcam.capture_rgb().transpose(2,0,1), self.frontcam.capture_rgb().transpose(2,0,1), self.frontcam.capture_depth()[None,...], rgb_handles_to_mask(self.frontcam_mask.capture_rgb())]).shape,
                        )
                    })

        self._gym_cam = None