Exemplo n.º 1
0
    def __init__(
        self,
        action_space: gym.spaces.Discrete,
        observation_space: SpaceDict,
        num_objects: int,
        num_colors: int,
        num_states: int,
        object_embedding_dim: int = 8,
        **kwargs,
    ):
        super().__init__(**prepare_locals_for_super(locals()))

        agent_view_x, agent_view_y, view_channels = observation_space[
            "minigrid_ego_image"].shape
        self.actor_critic = LinearActorCritic(
            self.ac_key,
            action_space=action_space,
            observation_space=SpaceDict({
                self.ac_key:
                gym.spaces.Box(
                    low=np.float32(-1.0),
                    high=np.float32(1.0),
                    shape=(self.object_embedding_dim * agent_view_x *
                           agent_view_y * view_channels, ),
                )
            }),
        )
        self.memory_key = None

        self.train()
Exemplo n.º 2
0
 def __init__(self, sensors: List[Sensor]) -> None:
     self.sensors = OrderedDict()
     spaces: OrderedDict[str, Space] = OrderedDict()
     for sensor in sensors:
         assert (
             sensor.uuid
             not in self.sensors), "'{}' is duplicated sensor uuid".format(
                 sensor.uuid)
         self.sensors[sensor.uuid] = sensor
         spaces[sensor.uuid] = sensor.observation_space
     self.observation_spaces = SpaceDict(spaces=spaces)
    def __init__(
        self,
        input_uuids: List[str],
        output_uuid: str,
        input_height: int,
        input_width: int,
        max_dets: int,
        detector_spatial_res: int,
        detector_thres: float,
        device: Optional[torch.device] = None,
        device_ids: Optional[List[torch.device]] = None,
        **kwargs: Any,
    ):
        self.input_height = input_height
        self.input_width = input_width
        self.max_dets = max_dets
        self.detector_spatial_res = detector_spatial_res
        self.detector_thres = detector_thres
        self.device = torch.device("cpu") if device is None else device
        self.device_ids = device_ids or cast(
            List[torch.device], list(range(torch.cuda.device_count())))

        self.frcnn: BatchedFasterRCNN = BatchedFasterRCNN(
            thres=self.detector_thres,
            maxdets=self.max_dets,
            res=self.detector_spatial_res,
        )

        spaces: OrderedDict[str, gym.Space] = OrderedDict()
        shape = (self.max_dets, self.detector_spatial_res,
                 self.detector_spatial_res)
        spaces["frcnn_classes"] = gym.spaces.Box(
            low=0,  # 0 is bg
            high=len(self.COCO_INSTANCE_CATEGORY_NAMES) - 1,
            shape=shape,
            dtype=np.int64,
        )
        shape = (
            self.max_dets * 5,
            self.detector_spatial_res,
            self.detector_spatial_res,
        )
        spaces["frcnn_boxes"] = gym.spaces.Box(low=-np.inf,
                                               high=np.inf,
                                               shape=shape)

        assert (
            len(input_uuids) == 1
        ), "fasterrcnn preprocessor can only consume one observation type"

        observation_space = SpaceDict(spaces=spaces)

        super().__init__(**prepare_locals_for_super(locals()))
Exemplo n.º 4
0
    def __init__(
        self,
        input_uuid: str,
        action_space: gym.spaces.Discrete,
        observation_space: SpaceDict,
        hidden_size: int = 128,
        num_layers: int = 1,
        rnn_type: str = "GRU",
        head_type: Callable[
            ..., ActorCriticModel[CategoricalDistr]] = LinearActorCritic,
    ):
        super().__init__(action_space=action_space,
                         observation_space=observation_space)
        self.hidden_size = hidden_size
        self.rnn_type = rnn_type

        assert (
            input_uuid in observation_space.spaces
        ), "LinearActorCritic expects only a single observational input."
        self.input_uuid = input_uuid

        box_space: gym.spaces.Box = observation_space[self.input_uuid]
        assert isinstance(box_space, gym.spaces.Box), (
            "RNNActorCritic requires that"
            "observation space corresponding to the input uuid is a Box space."
        )
        assert len(box_space.shape) == 1
        self.in_dim = box_space.shape[0]

        self.state_encoder = RNNStateEncoder(
            input_size=self.in_dim,
            hidden_size=hidden_size,
            num_layers=num_layers,
            rnn_type=rnn_type,
            trainable_masked_hidden_state=True,
        )

        self.head_uuid = "{}_{}".format("rnn", input_uuid)

        self.ac_nonrecurrent_head: ActorCriticModel[
            CategoricalDistr] = head_type(
                input_uuid=self.head_uuid,
                action_space=action_space,
                observation_space=SpaceDict({
                    self.head_uuid:
                    gym.spaces.Box(low=np.float32(0.0),
                                   high=np.float32(1.0),
                                   shape=(hidden_size, ))
                }),
            )

        self.memory_key = "rnn"
Exemplo n.º 5
0
    def __init__(
        self,
        action_space: gym.spaces.Discrete,
        observation_space: SpaceDict,
        num_objects: int,
        num_colors: int,
        num_states: int,
        object_embedding_dim: int = 8,
        hidden_size=512,
        num_layers=1,
        rnn_type="GRU",
        head_type: Callable[
            ..., ActorCriticModel[CategoricalDistr]
        ] = LinearActorCritic,
        **kwargs,
    ):
        super().__init__(**prepare_locals_for_super(locals()))

        self._hidden_size = hidden_size
        agent_view_x, agent_view_y, view_channels = observation_space[
            "minigrid_ego_image"
        ].shape
        self.actor_critic = RNNActorCritic(
            input_uuid=self.ac_key,
            action_space=action_space,
            observation_space=SpaceDict(
                {
                    self.ac_key: gym.spaces.Box(
                        low=np.float32(-1.0),
                        high=np.float32(1.0),
                        shape=(
                            self.object_embedding_dim
                            * agent_view_x
                            * agent_view_y
                            * view_channels,
                        ),
                    )
                }
            ),
            hidden_size=hidden_size,
            num_layers=num_layers,
            rnn_type=rnn_type,
            head_type=head_type,
        )
        self.memory_key = "rnn"

        self.train()
Exemplo n.º 6
0
    def __init__(self,
                 config: Config,
                 dataset: Optional[Dataset] = None) -> None:
        assert config.is_frozen(), ("Freeze the config before creating the "
                                    "environment, use config.freeze().")
        self._config = config
        self._dataset = dataset
        self._current_episode_index = None
        if self._dataset is None and config.DATASET.TYPE:
            self._dataset = make_dataset(id_dataset=config.DATASET.TYPE,
                                         config=config.DATASET)
        self._episodes = self._dataset.episodes if self._dataset else []
        self._current_episode = None
        iter_option_dict = {
            k.lower(): v
            for k, v in config.ENVIRONMENT.ITERATOR_OPTIONS.items()
        }
        self._episode_iterator = self._dataset.get_episode_iterator(
            **iter_option_dict)

        # load the first scene if dataset is present
        if self._dataset:
            assert (len(self._dataset.episodes) >
                    0), "dataset should have non-empty episodes list"
            self._config.defrost()
            self._config.SIMULATOR.SCENE = self._dataset.episodes[0].scene_id
            self._config.freeze()

        self._sim = make_sim(id_sim=self._config.SIMULATOR.TYPE,
                             config=self._config.SIMULATOR)
        self._task = make_task(
            self._config.TASK.TYPE,
            task_config=self._config.TASK,
            sim=self._sim,
            dataset=self._dataset,
        )
        self.observation_space = SpaceDict({
            **self._sim.sensor_suite.observation_spaces.spaces,
            **self._task.sensor_suite.observation_spaces.spaces,
        })
        self.action_space = self._sim.action_space
        self._max_episode_seconds = (
            self._config.ENVIRONMENT.MAX_EPISODE_SECONDS)
        self._max_episode_steps = self._config.ENVIRONMENT.MAX_EPISODE_STEPS
        self._elapsed_steps = 0
        self._episode_start_time: Optional[float] = None
        self._episode_over = False
Exemplo n.º 7
0
    def __init__(
        self,
        input_uuids: List[str],
        output_uuid: str,
        input_height: int,
        input_width: int,
        max_dets: int,
        detector_spatial_res: int,
        detector_thres: float,
        parallel: bool = False,
        device: Optional[torch.device] = None,
        device_ids: Optional[List[torch.device]] = None,
        **kwargs: Any,
    ):
        self.input_height = input_height
        self.input_width = input_width
        self.max_dets = max_dets
        self.detector_spatial_res = detector_spatial_res
        self.detector_thres = detector_thres
        self.parallel = parallel
        self.device = (device if device is not None else (
            "cuda" if self.parallel and torch.cuda.is_available() else "cpu"))
        self.device_ids = device_ids or cast(
            List[torch.device], list(range(torch.cuda.device_count())))

        self.frcnn: Union[BatchedFasterRCNN,
                          torch.nn.DataParallel] = BatchedFasterRCNN(
                              thres=self.detector_thres,
                              maxdets=self.max_dets,
                              res=self.detector_spatial_res,
                          )

        if self.parallel:
            assert (torch.cuda.is_available()
                    ), "attempt to parallelize detector without cuda"
            get_logger().info("Distributing detector")
            self.frcnn = self.frcnn.to(torch.device("cuda"))

            self.frcnn = torch.nn.DataParallel(self.frcnn,
                                               device_ids=self.device_ids)
            get_logger().info("Detected {} devices".format(
                torch.cuda.device_count()))

        spaces: OrderedDict[str, gym.Space] = OrderedDict()
        shape = (self.max_dets, self.detector_spatial_res,
                 self.detector_spatial_res)
        spaces["frcnn_classes"] = gym.spaces.Box(
            low=0,  # 0 is bg
            high=len(self.COCO_INSTANCE_CATEGORY_NAMES) - 1,
            shape=shape,
            dtype=np.int64,
        )
        shape = (
            self.max_dets * 5,
            self.detector_spatial_res,
            self.detector_spatial_res,
        )
        spaces["frcnn_boxes"] = gym.spaces.Box(low=-np.inf,
                                               high=np.inf,
                                               shape=shape)

        assert (
            len(input_uuids) == 1
        ), "fasterrcnn preprocessor can only consume one observation type"

        observation_space = SpaceDict(spaces=spaces)

        super().__init__(**prepare_locals_for_super(locals()))