Esempio n. 1
0
 def openable_not_pickupable_objects(self, visible_only: bool = True):
     with include_object_data(self.walkthrough_env.controller):
         return [
             o for o in self.walkthrough_env.last_event.metadata["objects"]
             if ((o["visible"] or not visible_only) and (
                 o["openable"] and not o["pickupable"]))
         ]
Esempio n. 2
0
    def query_expert(self, **kwargs) -> Tuple[Any, bool]:
        if self.greedy_expert is None:
            if not hasattr(self.unshuffle_env, "shortest_path_navigator"):
                # TODO: This is a bit hacky
                self.unshuffle_env.shortest_path_navigator = ShortestPathNavigatorTHOR(
                    controller=self.unshuffle_env.controller,
                    grid_size=STEP_SIZE,
                    include_move_left_right=all(
                        f"move_{k}" in self.action_names()
                        for k in ["left", "right"]),
                )

            self.greedy_expert = GreedyUnshuffleExpert(
                task=self,
                shortest_path_navigator=self.unshuffle_env.
                shortest_path_navigator,
            )
            if self.object_names_seen_in_walkthrough is not None:
                # The expert shouldn't act on objects the walkthrougher hasn't seen!
                c = self.unshuffle_env.controller
                with include_object_data(c):
                    for o in c.last_event.metadata["objects"]:
                        if o["name"] not in self.object_names_seen_in_walkthrough:
                            self.greedy_expert.object_name_to_priority[
                                o["name"]] = (
                                    self.greedy_expert.max_priority_per_object
                                    + 1)

        action = self.greedy_expert.expert_action
        if action is None:
            return 0, False
        else:
            return action, True
    def _generate_expert_action_dict(self) -> Dict[str, Any]:
        """Generate a dictionary describing the next greedy expert action."""
        env = self.task.unshuffle_env

        if env.mode != RearrangeMode.SNAP:
            raise NotImplementedError(
                f"Expert only defined for 'easy' mode (current mode: {env.mode}"
            )

        held_object = env.held_object

        agent_loc = env.get_agent_location()

        if held_object is not None:
            self._last_to_interact_object_pose = None

            # Should navigate to a position where the held object can be placed
            expert_nav_action = self._expert_nav_action_to_obj(obj={
                **held_object,
                **{
                    k: env.obj_name_to_walkthrough_start_pose[held_object["name"]][k]
                    for k in ["position", "rotation"]
                },
            }, )

            if expert_nav_action is None:
                # Could not find a path to the target, let's just immediately drop the held object
                return dict(action="DropHeldObjectWithSnap")
            elif expert_nav_action is "Pass":
                # We are in a position where we can drop the object, let's do that
                return dict(action="DropHeldObjectWithSnap")
            else:
                return dict(action=expert_nav_action)
        else:
            _, goal_poses, cur_poses = env.poses

            assert len(goal_poses) == len(cur_poses)

            failed_places_and_min_dist = (float("inf"), float("inf"))
            obj_pose_to_go_to = None
            goal_obj_pos = None
            for gp, cp in zip(goal_poses, cur_poses):
                if ((gp["broken"] == cp["broken"] == False)
                        and self.object_name_to_priority[gp["name"]] <=
                        self.max_priority_per_object and
                        not RearrangeTHOREnvironment.are_poses_equal(gp, cp)):
                    priority = self.object_name_to_priority[gp["name"]]
                    priority_and_dist_to_object = (
                        priority,
                        IThorEnvironment.position_dist(agent_loc,
                                                       gp["position"],
                                                       ignore_y=True,
                                                       l1_dist=True),
                    )
                    if (self._last_to_interact_object_pose is not None
                            and self._last_to_interact_object_pose["name"]
                            == gp["name"]):
                        # Set distance to -1 for the currently targeted object
                        priority_and_dist_to_object = (
                            priority_and_dist_to_object[0],
                            -1,
                        )

                    if priority_and_dist_to_object < failed_places_and_min_dist:
                        failed_places_and_min_dist = priority_and_dist_to_object
                        obj_pose_to_go_to = cp
                        goal_obj_pos = gp

            self._last_to_interact_object_pose = obj_pose_to_go_to

            if obj_pose_to_go_to is None:
                # There are no objects we need to change
                return dict(action="Done")

            expert_nav_action = self._expert_nav_action_to_obj(
                obj=obj_pose_to_go_to)
            if expert_nav_action is None:
                interactable_positions = self.task.env._interactable_positions_cache.get(
                    scene_name=env.scene,
                    obj=obj_pose_to_go_to,
                    controller=env.controller,
                )
                if len(interactable_positions) != 0:
                    # Could not find a path to the object, increment the place count of the object and
                    # try generating a new action.
                    get_logger().debug(
                        f"Could not find a path to {obj_pose_to_go_to}"
                        f" in scene {self.task.unshuffle_env.scene}"
                        f" when at position {self.task.unshuffle_env.get_agent_location()}."
                    )
                else:
                    get_logger().debug(
                        f"Object {obj_pose_to_go_to} in scene {self.task.unshuffle_env.scene}"
                        f" has no interactable positions.")
                self.object_name_to_priority[obj_pose_to_go_to["name"]] += 1
                return self._generate_expert_action_dict()
            elif expert_nav_action == "Pass":
                with include_object_data(env.controller):
                    visible_objects = {
                        o["name"]
                        for o in env.last_event.metadata["objects"]
                        if o["visible"]
                    }

                if obj_pose_to_go_to["name"] not in visible_objects:
                    if self._invalidate_interactable_loc_for_pose(
                            location=agent_loc, obj_pose=obj_pose_to_go_to):
                        return self._generate_expert_action_dict()

                    raise RuntimeError("This should not be possible.")

                # The object of interest is interactable at the moment
                if (obj_pose_to_go_to["openness"] is not None
                        and obj_pose_to_go_to["openness"] !=
                        goal_obj_pos["openness"]):
                    return dict(
                        action="OpenByType",
                        objectId=obj_pose_to_go_to["objectId"],
                        openness=goal_obj_pos["openness"],
                    )
                elif obj_pose_to_go_to["pickupable"]:
                    return dict(
                        action="Pickup",
                        objectId=obj_pose_to_go_to["objectId"],
                    )
                else:
                    # We (likely) have an openable object which has been moved somehow but is not
                    # pickupable. We don't know what to do with such an object so we'll set its
                    # place count to a large value and try again.
                    get_logger().warning(
                        f"{obj_pose_to_go_to['name']} has moved but is not pickupable."
                    )
                    self.object_name_to_priority[goal_obj_pos["name"]] = (
                        self.max_priority_per_object + 1)
                    return self._generate_expert_action_dict()
            else:
                # If we are not looking at the object to change, then we should navigate to it
                return dict(action=expert_nav_action)
Esempio n. 4
0
    def _step(self, action: int) -> RLStepResult:
        """
        action : is the index of the action from self.action_names()
        """
        # parse the action data
        action_name = self.action_names()[action]

        if action_name.startswith("pickup"):
            # NOTE: due to the object_id's not being in the metadata for speedups,
            # they cannot be targeted with interactible actions. Hence, why
            # we're resetting the object filter before targeting by object id.

            with include_object_data(self.unshuffle_env.controller):
                metadata = self.unshuffle_env.last_event.metadata

                if len(metadata["inventoryObjects"]) != 0:
                    action_success = False
                else:
                    object_type = stringcase.pascalcase(
                        action_name.replace("pickup_", ""))
                    possible_objects = [
                        o for o in metadata["objects"]
                        if o["visible"] and o["objectType"] == object_type
                    ]

                    possible_objects = sorted(possible_objects,
                                              key=lambda po:
                                              (po["distance"], po["name"]))
                    object_before = None
                    if len(possible_objects) > 0:
                        object_before = possible_objects[0]
                        object_id = object_before["objectId"]

                    if object_before is not None:
                        self.unshuffle_env.controller.step(
                            "PickupObject",
                            objectId=object_id,
                            **self.unshuffle_env.physics_step_kwargs,
                        )
                        action_success = self.unshuffle_env.last_event.metadata[
                            "lastActionSuccess"]
                    else:
                        action_success = False

                    if action_success and self.unshuffle_env.held_object is None:
                        get_logger().warning(
                            f"`PickupObject` was successful in picking up {object_id} but we're not holding"
                            f" any objects! Current task spec:\n{self.unshuffle_env.current_task_spec}."
                        )
                        action_success = False

        elif action_name.startswith("open_by_type"):
            object_type = stringcase.pascalcase(
                action_name.replace("open_by_type_", ""))
            with include_object_data(self.unshuffle_env.controller):

                obj_name_to_goal_and_cur_poses = {
                    cur_pose["name"]: (goal_pose, cur_pose)
                    for _, goal_pose, cur_pose in zip(
                        *self.unshuffle_env.poses)
                }

                goal_pose = None
                cur_pose = None
                for o in self.unshuffle_env.last_event.metadata["objects"]:
                    if (o["visible"] and o["objectType"] == object_type
                            and o["openable"]
                            and not self.unshuffle_env.are_poses_equal(
                                *obj_name_to_goal_and_cur_poses[o["name"]])):
                        goal_pose, cur_pose = obj_name_to_goal_and_cur_poses[
                            o["name"]]
                        break

                if goal_pose is not None:
                    object_id = cur_pose["objectId"]
                    goal_openness = goal_pose["openness"]

                    if cur_pose["openness"] > 0.0:
                        self.unshuffle_env.controller.step(
                            "CloseObject",
                            objectId=object_id,
                            **self.unshuffle_env.physics_step_kwargs,
                        )

                    self.unshuffle_env.controller.step(
                        "OpenObject",
                        objectId=object_id,
                        openness=goal_openness,
                        **self.unshuffle_env.physics_step_kwargs,
                    )
                    action_success = self.unshuffle_env.last_event.metadata[
                        "lastActionSuccess"]
                else:
                    action_success = False

        elif action_name.startswith(
            ("move", "rotate", "look", "stand", "crouch")):
            # apply to only the unshuffle env as the walkthrough agent's position
            # must now be managed by the whichever sensor is trying to read data from it.
            action_success = getattr(self.unshuffle_env, action_name)()
        elif action_name == "drop_held_object_with_snap":
            action_success = getattr(self.unshuffle_env, action_name)()
        elif action_name == "done":
            self._took_end_action = True
            action_success = True
        elif action_name == "pass":
            action_success = True
        else:
            raise RuntimeError(
                f"Action '{action_name}' is not in the action space {RearrangeActionSpace}"
            )

        self.actions_taken.append(action_name)
        self.actions_taken_success.append(action_success)
        if self.task_spec_in_metrics:
            self.agent_locs.append(self.unshuffle_env.get_agent_location())
        return RLStepResult(
            observation=None,
            reward=self._judge(),
            done=self.is_done(),
            info={
                "action_name": action_name,
                "action_success": action_success
            },
        )