def __init__(self, venv, directory, record_video_trigger, video_length=200):
        """
        # Arguments
            venv: VecEnv to wrap
            directory: Where to save videos
            record_video_trigger:
                Function that defines when to start recording.
                The function takes the current number of step,
                and returns whether we should start recording or not.
            video_length: Length of recorded video
        """

        VecEnvWrapper.__init__(self, venv)
        self.record_video_trigger = record_video_trigger
        self.video_recorder = None

        self.directory = os.path.abspath(directory)
        if not os.path.exists(self.directory): os.mkdir(self.directory)

        self.file_prefix = "vecenv"
        self.file_infix = '{}'.format(os.getpid())
        self.step_id = 0
        self.video_length = video_length

        self.recording = False
        self.recorded_frames = 0
Beispiel #2
0
    def __init__(self, venv, mode, paths, clip=10., epsilon=1e-8):
        VecEnvWrapper.__init__(self, venv)

        # TODO: Find a more elegant way for controlling wrapper activation in environments.
        # self.training = True if mode == "train" else False
        self.training = False if mode in ["test", "eval", "custom"] else True

        self.paths = paths
        # Check all entries
        for path in self.paths:
            # Works only for vector observations, not for images
            if (not len(self.observation_space.spaces[path].shape)
                    == 1) and (isinstance(self.observation_space.spaces[path],
                                          spaces.Box)):
                raise TypeError(
                    "VecNormalizeObsDict supports 1d (vector) Box observation spaces. Whereas it is {}d for '{}'."
                    .format(len(self.observation_space.spaces[path].shape),
                            path))

        # Create normalizers
        self.normalizers = {}
        for path in self.paths:
            self.normalizers[path] = Normalizer(
                shape=self.observation_space.spaces[path].shape,
                clip=clip,
                eps=epsilon)

            self.observation_space.spaces[path].low = np.full_like(
                self.observation_space.spaces[path].low, -clip)
            self.observation_space.spaces[path].high = np.full_like(
                self.observation_space.spaces[path].high, +clip)
Beispiel #3
0
    def __init__(self, venv, mode, path, nstack, axis=-1):
        """
        * ``Axis = 0``:  If images are already transposed using :class:`~digideep.environment.wrappers.normal.WrapperTransposeImage`,
            then use ``axis=0`` to stack images on the first dimension which is channels. This will be compatible with PyTorch.
        * ``Axis = -1``: If images are NOT transposed and their shape is like ``(W,H,3)``, then stack images on the last axis, which
            is again the channels. This is compatible with the OpenAI's defaults.
        """
        self.venv = venv
        self.path = path
        self.axis = axis
        self.nstack = nstack

        observation_space = venv.observation_space
        wos = observation_space.spaces[self.path]  # wrapped ob space

        ## This shape0 is in [1,3].
        self.axis_dim = wos.shape[axis]
        low = np.repeat(wos.low, self.nstack, axis=self.axis)
        high = np.repeat(wos.high, self.nstack, axis=self.axis)
        # This serves as a queue type history for past observations.
        self.stacked_obs = np.zeros((venv.num_envs, ) + low.shape,
                                    dtype=low.dtype)

        mod_observation_space = spaces.Box(
            low=low,
            high=high,
            dtype=observation_space.spaces[self.path].dtype)

        observation_space.spaces[self.path] = mod_observation_space
        VecEnvWrapper.__init__(self, venv, observation_space=observation_space)
Beispiel #4
0
    def __init__(self, venv, mode, clip=10., gamma=0.99, epsilon=1e-8):
        VecEnvWrapper.__init__(self, venv)
        self.training = True if mode == "train" else False
        # self.training = False if mode in ["test", "eval"] else True

        self.normalizer = Normalizer(shape=(), clip=clip, eps=epsilon)
        self.ret = np.zeros(self.num_envs)
        self.gamma = gamma
Beispiel #5
0
 def __init__(self, venv):
     VecEnvWrapper.__init__(self, venv)
Beispiel #6
0
 def __init__(self, venv):
     VecEnvWrapper.__init__(self, venv)
     # NOTE: This wrapper also flattens the observation space (nested observation spaces will be flattened).
     self.observation_space = spaces.Dict(
         _flatten_space(self.observation_space))
 def close(self):
     VecEnvWrapper.close(self)
     self.close_video_recorder()
Beispiel #8
0
    def __init__(self,
                 venv,
                 mode,
                 session_state=None,
                 threshold=0.,
                 interval=100,
                 info_keys=["/rand"],
                 obs_keys=["/parameters"],
                 num_workers=None):
        VecEnvWrapper.__init__(self, venv)
        self.session_state = session_state

        self.eprets = None
        self.eplens = None
        self.tstart = time.time()

        # Observations are one step old, so should be the infos!
        self.infos = None
        self.obs = None

        if self.session_state:
            self.filename = self.session_state['path_session']
            # print(">>>> Success filename set to:", self.filename)
        else:
            self.filename = ""

        self.results_writer = None
        self.num_workers = num_workers

        self.threshold = threshold
        self.interval = interval

        self.key_root_infos = info_keys
        self.key_root_obs = obs_keys

        self.stats = {
            "episodes": 0,
            "episodes_failure": 0,
            #   "scatter":{
            #       "offset_noise_2d": Scatter("Offset Noise 2D")
            #   },
            "scatter": {
                "offset_noise_2d": [[], []],
                "controller_thre": [],
                "controller_gain": [],
                "randomized_duration": [],
                "r": [],
                "subject_id": [],
                "starting_id": []
            },
            "histogram": {
                #   "controller_thre": Histogram("Controller Threshold", limit=[0.05,0.25], bins=4),
                #   "controller_gain": Histogram("Controller Gain", limit=[0,10], bins=10),

                #   "randomized_duration": Histogram("Randomized Duration", limit=[0.0,10.0], bins=10),
                #   "r": Histogram("Rewards", limit=[0,55], bins=11),
                #   "subject_id": Histogram("Subject ID", categories=["01","02","03","04","05","06","07","08","09"]),
                "rotation_style":
                Histogram("Rotation Style",
                          categories=["nl", "nr", "fl", "fr"]),
                #   "starting_id": Histogram("Starting ID", categories=["01","02","03","04","05","06","07","08","09","10","11","12","13","14","15","16"]),
                "worker_id":
                Histogram("Worker ID", categories=list(range(num_workers)))
            }
        }