def rearrangement_datagen_worker( input_queue: mp.Queue, output_queue: mp.Queue, scene_to_obj_name_to_avoid_positions: Optional[Dict[str, Dict[ str, np.ndarray]]] = None, ): ptitle("Rearrange Datagen Worker") env = RearrangeTHOREnvironment( force_cache_reset=True, controller_kwargs={"commit_id": THOR_COMMIT_ID}) while True: try: scene, stage, seed = input_queue.get(timeout=2) except queue.Empty: break data = generate_rearrangements_for_scenes( stage_seed=seed, stage_scenes=[scene], env=env, object_types_to_not_move=OBJECT_TYPES_TO_NOT_MOVE, obj_name_to_avoid_positions=None if scene_to_obj_name_to_avoid_positions is None else scene_to_obj_name_to_avoid_positions[scene], ) output_queue.put((scene, stage, data[scene]))
def _generate_expert_action_dict(self) -> Dict[str, Any]: """Generate a dictionary describing the next greedy expert action.""" env = self.task.unshuffle_env if env.mode != RearrangeMode.SNAP: raise NotImplementedError( f"Expert only defined for 'easy' mode (current mode: {env.mode}" ) held_object = env.held_object agent_loc = env.get_agent_location() if held_object is not None: self._last_to_interact_object_pose = None # Should navigate to a position where the held object can be placed expert_nav_action = self._expert_nav_action_to_obj(obj={ **held_object, **{ k: env.obj_name_to_walkthrough_start_pose[held_object["name"]][k] for k in ["position", "rotation"] }, }, ) if expert_nav_action is None: # Could not find a path to the target, let's just immediately drop the held object return dict(action="DropHeldObjectWithSnap") elif expert_nav_action is "Pass": # We are in a position where we can drop the object, let's do that return dict(action="DropHeldObjectWithSnap") else: return dict(action=expert_nav_action) else: _, goal_poses, cur_poses = env.poses assert len(goal_poses) == len(cur_poses) failed_places_and_min_dist = (float("inf"), float("inf")) obj_pose_to_go_to = None goal_obj_pos = None for gp, cp in zip(goal_poses, cur_poses): if ((gp["broken"] == cp["broken"] == False) and self.object_name_to_priority[gp["name"]] <= self.max_priority_per_object and not RearrangeTHOREnvironment.are_poses_equal(gp, cp)): priority = self.object_name_to_priority[gp["name"]] priority_and_dist_to_object = ( priority, IThorEnvironment.position_dist(agent_loc, gp["position"], ignore_y=True, l1_dist=True), ) if (self._last_to_interact_object_pose is not None and self._last_to_interact_object_pose["name"] == gp["name"]): # Set distance to -1 for the currently targeted object priority_and_dist_to_object = ( priority_and_dist_to_object[0], -1, ) if priority_and_dist_to_object < failed_places_and_min_dist: failed_places_and_min_dist = priority_and_dist_to_object obj_pose_to_go_to = cp goal_obj_pos = gp self._last_to_interact_object_pose = obj_pose_to_go_to if obj_pose_to_go_to is None: # There are no objects we need to change return dict(action="Done") expert_nav_action = self._expert_nav_action_to_obj( obj=obj_pose_to_go_to) if expert_nav_action is None: interactable_positions = self.task.env._interactable_positions_cache.get( scene_name=env.scene, obj=obj_pose_to_go_to, controller=env.controller, ) if len(interactable_positions) != 0: # Could not find a path to the object, increment the place count of the object and # try generating a new action. get_logger().debug( f"Could not find a path to {obj_pose_to_go_to}" f" in scene {self.task.unshuffle_env.scene}" f" when at position {self.task.unshuffle_env.get_agent_location()}." ) else: get_logger().debug( f"Object {obj_pose_to_go_to} in scene {self.task.unshuffle_env.scene}" f" has no interactable positions.") self.object_name_to_priority[obj_pose_to_go_to["name"]] += 1 return self._generate_expert_action_dict() elif expert_nav_action == "Pass": with include_object_data(env.controller): visible_objects = { o["name"] for o in env.last_event.metadata["objects"] if o["visible"] } if obj_pose_to_go_to["name"] not in visible_objects: if self._invalidate_interactable_loc_for_pose( location=agent_loc, obj_pose=obj_pose_to_go_to): return self._generate_expert_action_dict() raise RuntimeError("This should not be possible.") # The object of interest is interactable at the moment if (obj_pose_to_go_to["openness"] is not None and obj_pose_to_go_to["openness"] != goal_obj_pos["openness"]): return dict( action="OpenByType", objectId=obj_pose_to_go_to["objectId"], openness=goal_obj_pos["openness"], ) elif obj_pose_to_go_to["pickupable"]: return dict( action="Pickup", objectId=obj_pose_to_go_to["objectId"], ) else: # We (likely) have an openable object which has been moved somehow but is not # pickupable. We don't know what to do with such an object so we'll set its # place count to a large value and try again. get_logger().warning( f"{obj_pose_to_go_to['name']} has moved but is not pickupable." ) self.object_name_to_priority[goal_obj_pos["name"]] = ( self.max_priority_per_object + 1) return self._generate_expert_action_dict() else: # If we are not looking at the object to change, then we should navigate to it return dict(action=expert_nav_action)
def __init__( self, run_walkthrough_phase: bool, run_unshuffle_phase: bool, stage: str, scenes_to_task_spec_dicts: Dict[str, List[Dict[str, Any]]], rearrange_env_kwargs: Optional[Dict[str, Any]], sensors: SensorSuite, max_steps: Union[Dict[str, int], int], discrete_actions: Tuple[str, ...], require_done_action: bool, force_axis_aligned_start: bool, epochs: Union[int, float, str] = "default", seed: Optional[int] = None, unshuffle_runs_per_walkthrough: Optional[int] = None, task_spec_in_metrics: bool = False, ) -> None: assert isinstance(run_walkthrough_phase, bool) and isinstance( run_unshuffle_phase, bool), ( f"Both `run_walkthrough_phase` (== {run_walkthrough_phase})" f" and `run_unshuffle_phase` (== {run_unshuffle_phase})" f" must be boolean valued.") assert ( run_walkthrough_phase or run_unshuffle_phase ), "One of `run_walkthrough_phase` or `run_unshuffle_phase` must be `True`." assert (unshuffle_runs_per_walkthrough is None) or ( run_walkthrough_phase and run_unshuffle_phase ), ("`unshuffle_runs_per_walkthrough` should be `None` if either `run_walkthrough_phase` or" " `run_unshuffle_phase` is `False`.") assert ( unshuffle_runs_per_walkthrough is None ) or unshuffle_runs_per_walkthrough >= 1, f"`unshuffle_runs_per_walkthrough` (=={unshuffle_runs_per_walkthrough}) must be >= 1." self.run_walkthrough_phase = run_walkthrough_phase self.run_unshuffle_phase = run_unshuffle_phase self.sensors = sensors self.stage = stage self.main_seed = seed if seed is not None else random.randint( 0, 2 * 30 - 1) self.unshuffle_runs_per_walkthrough = ( 1 if unshuffle_runs_per_walkthrough is None else unshuffle_runs_per_walkthrough) self.cur_unshuffle_runs_count = 0 self.task_spec_in_metrics = task_spec_in_metrics self.scenes_to_task_spec_dicts = copy.deepcopy( scenes_to_task_spec_dicts) if isinstance(epochs, str): if epochs.lower().strip() != "default": raise NotImplementedError( f"Unknown value for `epochs` (=={epochs})") epochs = float("inf") if stage == "train" else 1 self.task_spec_iterator = RearrangeTaskSpecIterable( scenes_to_task_spec_dicts=self.scenes_to_task_spec_dicts, seed=self.main_seed, epochs=epochs, shuffle=epochs == float("inf"), ) self.walkthrough_env = RearrangeTHOREnvironment(**rearrange_env_kwargs) self.unshuffle_env: Optional[RearrangeTHOREnvironment] = None if self.run_unshuffle_phase: self.unshuffle_env = RearrangeTHOREnvironment( **rearrange_env_kwargs) self.scenes = list(self.scenes_to_task_spec_dicts.keys()) if isinstance(max_steps, int): max_steps = {"unshuffle": max_steps, "walkthrough": max_steps} self.max_steps: Dict[str, int] = max_steps self.discrete_actions = discrete_actions self.require_done_action = require_done_action self.force_axis_aligned_start = force_axis_aligned_start self._last_sampled_task: Optional[Union[UnshuffleTask, WalkthroughTask]] = None self._last_sampled_walkthrough_task: Optional[WalkthroughTask] = None self.was_in_exploration_phase: bool = False
class RearrangeTaskSampler(TaskSampler): def __init__( self, run_walkthrough_phase: bool, run_unshuffle_phase: bool, stage: str, scenes_to_task_spec_dicts: Dict[str, List[Dict[str, Any]]], rearrange_env_kwargs: Optional[Dict[str, Any]], sensors: SensorSuite, max_steps: Union[Dict[str, int], int], discrete_actions: Tuple[str, ...], require_done_action: bool, force_axis_aligned_start: bool, epochs: Union[int, float, str] = "default", seed: Optional[int] = None, unshuffle_runs_per_walkthrough: Optional[int] = None, task_spec_in_metrics: bool = False, ) -> None: assert isinstance(run_walkthrough_phase, bool) and isinstance( run_unshuffle_phase, bool), ( f"Both `run_walkthrough_phase` (== {run_walkthrough_phase})" f" and `run_unshuffle_phase` (== {run_unshuffle_phase})" f" must be boolean valued.") assert ( run_walkthrough_phase or run_unshuffle_phase ), "One of `run_walkthrough_phase` or `run_unshuffle_phase` must be `True`." assert (unshuffle_runs_per_walkthrough is None) or ( run_walkthrough_phase and run_unshuffle_phase ), ("`unshuffle_runs_per_walkthrough` should be `None` if either `run_walkthrough_phase` or" " `run_unshuffle_phase` is `False`.") assert ( unshuffle_runs_per_walkthrough is None ) or unshuffle_runs_per_walkthrough >= 1, f"`unshuffle_runs_per_walkthrough` (=={unshuffle_runs_per_walkthrough}) must be >= 1." self.run_walkthrough_phase = run_walkthrough_phase self.run_unshuffle_phase = run_unshuffle_phase self.sensors = sensors self.stage = stage self.main_seed = seed if seed is not None else random.randint( 0, 2 * 30 - 1) self.unshuffle_runs_per_walkthrough = ( 1 if unshuffle_runs_per_walkthrough is None else unshuffle_runs_per_walkthrough) self.cur_unshuffle_runs_count = 0 self.task_spec_in_metrics = task_spec_in_metrics self.scenes_to_task_spec_dicts = copy.deepcopy( scenes_to_task_spec_dicts) if isinstance(epochs, str): if epochs.lower().strip() != "default": raise NotImplementedError( f"Unknown value for `epochs` (=={epochs})") epochs = float("inf") if stage == "train" else 1 self.task_spec_iterator = RearrangeTaskSpecIterable( scenes_to_task_spec_dicts=self.scenes_to_task_spec_dicts, seed=self.main_seed, epochs=epochs, shuffle=epochs == float("inf"), ) self.walkthrough_env = RearrangeTHOREnvironment(**rearrange_env_kwargs) self.unshuffle_env: Optional[RearrangeTHOREnvironment] = None if self.run_unshuffle_phase: self.unshuffle_env = RearrangeTHOREnvironment( **rearrange_env_kwargs) self.scenes = list(self.scenes_to_task_spec_dicts.keys()) if isinstance(max_steps, int): max_steps = {"unshuffle": max_steps, "walkthrough": max_steps} self.max_steps: Dict[str, int] = max_steps self.discrete_actions = discrete_actions self.require_done_action = require_done_action self.force_axis_aligned_start = force_axis_aligned_start self._last_sampled_task: Optional[Union[UnshuffleTask, WalkthroughTask]] = None self._last_sampled_walkthrough_task: Optional[WalkthroughTask] = None self.was_in_exploration_phase: bool = False @classmethod def from_fixed_dataset( cls, run_walkthrough_phase: bool, run_unshuffle_phase: bool, stage: str, allowed_scenes: Optional[Sequence[str]] = None, scene_to_allowed_rearrange_inds: Optional[Dict[str, Sequence[int]]] = None, randomize_start_rotation: bool = False, **init_kwargs, ): scenes_to_task_spec_dicts = cls._filter_scenes_to_task_spec_dicts( scenes_to_task_spec_dicts=cls.load_rearrange_data_from_path( stage=stage, base_dir=STARTER_DATA_DIR), allowed_scenes=allowed_scenes, scene_to_allowed_rearrange_inds=scene_to_allowed_rearrange_inds, ) if randomize_start_rotation: random_gen = random.Random(1) for scene in sorted(scenes_to_task_spec_dicts.keys()): for task_spec_dict in scenes_to_task_spec_dicts[scene]: task_spec_dict[ "agent_rotation"] = 360.0 * random_gen.random() return cls( run_walkthrough_phase=run_walkthrough_phase, run_unshuffle_phase=run_unshuffle_phase, stage=stage, scenes_to_task_spec_dicts=scenes_to_task_spec_dicts, **init_kwargs, ) @classmethod def from_scenes_at_runtime( cls, run_walkthrough_phase: bool, run_unshuffle_phase: bool, stage: str, allowed_scenes: Sequence[str], repeats_before_scene_change: int, **init_kwargs, ): assert "scene_to_allowed_rearrange_inds" not in init_kwargs assert repeats_before_scene_change >= 1 return cls( run_walkthrough_phase=run_walkthrough_phase, run_unshuffle_phase=run_unshuffle_phase, stage=stage, scenes_to_task_spec_dicts={ scene: tuple({ scene: scene, "runtime_sample": True } for _ in range(repeats_before_scene_change)) for scene in allowed_scenes }, **init_kwargs, ) @classmethod def _filter_scenes_to_task_spec_dicts( cls, scenes_to_task_spec_dicts: Dict[str, List[Dict[str, Any]]], allowed_scenes: Optional[Sequence[str]], scene_to_allowed_rearrange_inds: Optional[Dict[str, Sequence[int]]], ) -> Dict[str, List[Dict[str, Any]]]: if allowed_scenes is not None: scenes_to_task_spec_dicts = { scene: scenes_to_task_spec_dicts[scene] for scene in allowed_scenes } if scene_to_allowed_rearrange_inds is not None: scenes_to_task_spec_dicts = { scene: [ scenes_to_task_spec_dicts[scene][ind] for ind in sorted(scene_to_allowed_rearrange_inds[scene]) ] for scene in scene_to_allowed_rearrange_inds if scene in scenes_to_task_spec_dicts } return scenes_to_task_spec_dicts @classmethod def load_rearrange_data_from_path( cls, stage: str, base_dir: Optional[str] = None, ) -> Dict[str, List[Dict[str, Any]]]: stage = stage.lower() if stage == "valid": stage = "val" data_path = os.path.abspath(os.path.join(base_dir, f"{stage}.pkl.gz")) if not os.path.exists(data_path): raise RuntimeError(f"No data at path {data_path}") data = compress_pickle.load(path=data_path) for scene in data: for ind, task_spec_dict in enumerate(data[scene]): task_spec_dict["scene"] = scene if "index" not in task_spec_dict: task_spec_dict["index"] = ind if "stage" not in task_spec_dict: task_spec_dict["stage"] = stage return data @property def length(self) -> float: """Return the total number of allowable next_task calls.""" count = self.run_walkthrough_phase + self.run_unshuffle_phase if count == 1: return self.task_spec_iterator.length elif count == 2: mult = self.unshuffle_runs_per_walkthrough count = (1 + mult) * self.task_spec_iterator.length if self.last_sampled_task is not None and ( isinstance(self.last_sampled_task, WalkthroughTask) or self.cur_unshuffle_runs_count < mult): count += mult - self.cur_unshuffle_runs_count return count else: raise NotImplementedError @property def total_unique(self): return self.task_spec_iterator.total_unique @property def last_sampled_task(self) -> Optional[UnshuffleTask]: """Return the most recent sampled task.""" return self._last_sampled_task @property def all_observation_spaces_equal(self) -> bool: """Return if the observation space remains the same across steps.""" return True def close(self) -> None: """Close the open AI2-THOR controllers.""" try: self.unshuffle_env.stop() except Exception as _: pass try: self.walkthrough_env.stop() except Exception as _: pass def reset(self) -> None: """Restart the unshuffle iteration setup order.""" self.task_spec_iterator.reset() self.cur_unshuffle_runs_count = 0 self._last_sampled_task = None self._last_sampled_walkthrough_task = None def set_seed(self, seed: int) -> None: self.task_spec_iterator.seed = seed self.main_seed = seed @property def current_task_spec(self) -> RearrangeTaskSpec: if self.run_unshuffle_phase: return self.unshuffle_env.current_task_spec else: return self.walkthrough_env.current_task_spec def next_task(self, forced_task_spec: Optional[RearrangeTaskSpec] = None, **kwargs) -> Optional[UnshuffleTask]: """Return a fresh UnshuffleTask setup.""" walkthrough_finished_and_should_run_unshuffle = ( forced_task_spec is None and self.run_unshuffle_phase and self.run_walkthrough_phase and (self.was_in_exploration_phase or self.cur_unshuffle_runs_count < self.unshuffle_runs_per_walkthrough)) if (self.last_sampled_task is None or not walkthrough_finished_and_should_run_unshuffle): self.cur_unshuffle_runs_count = 0 try: if forced_task_spec is None: task_spec: RearrangeTaskSpec = next( self.task_spec_iterator) else: task_spec = forced_task_spec except StopIteration: self._last_sampled_task = None return self._last_sampled_task runtime_sample = task_spec.runtime_sample try: if self.run_unshuffle_phase: self.unshuffle_env.reset( task_spec=task_spec, force_axis_aligned_start=self.force_axis_aligned_start, ) self.unshuffle_env.shuffle() if runtime_sample: unshuffle_task_spec = self.unshuffle_env.current_task_spec starting_objects = unshuffle_task_spec.runtime_data[ "starting_objects"] openable_data = [{ "name": o["name"], "objectName": o["name"], "objectId": o["objectId"], "start_openness": o["openness"], "target_openness": o["openness"], } for o in starting_objects if o["isOpen"] and not o["pickupable"] ] starting_poses = [{ "name": o["name"], "objectName": o["name"], "position": o["position"], "rotation": o["rotation"], } for o in starting_objects if o["pickupable"]] task_spec = RearrangeTaskSpec( scene=unshuffle_task_spec.scene, agent_position=task_spec.agent_position, agent_rotation=task_spec.agent_rotation, openable_data=openable_data, starting_poses=starting_poses, target_poses=starting_poses, ) self.walkthrough_env.reset( task_spec=task_spec, force_axis_aligned_start=self.force_axis_aligned_start, ) if self.run_walkthrough_phase: self.was_in_exploration_phase = True self._last_sampled_task = WalkthroughTask( sensors=self.sensors, walkthrough_env=self.walkthrough_env, max_steps=self.max_steps["walkthrough"], discrete_actions=self.discrete_actions, disable_metrics=self.run_unshuffle_phase, ) self._last_sampled_walkthrough_task = self._last_sampled_task else: self.cur_unshuffle_runs_count += 1 self._last_sampled_task = UnshuffleTask( sensors=self.sensors, unshuffle_env=self.unshuffle_env, walkthrough_env=self.walkthrough_env, max_steps=self.max_steps["unshuffle"], discrete_actions=self.discrete_actions, require_done_action=self.require_done_action, task_spec_in_metrics=self.task_spec_in_metrics, ) except Exception as e: if runtime_sample: get_logger().error( "Encountered exception while sampling a next task." " As this next task was a 'runtime sample' we are" " simply returning the next task.") get_logger().error(traceback.format_exc()) return self.next_task() else: raise e else: self.cur_unshuffle_runs_count += 1 self.was_in_exploration_phase = False walkthrough_task = cast(WalkthroughTask, self._last_sampled_walkthrough_task) if self.cur_unshuffle_runs_count != 1: self.unshuffle_env.reset( task_spec=self.unshuffle_env.current_task_spec, force_axis_aligned_start=self.force_axis_aligned_start, ) self.unshuffle_env.shuffle() self._last_sampled_task = UnshuffleTask( sensors=self.sensors, unshuffle_env=self.unshuffle_env, walkthrough_env=self.walkthrough_env, max_steps=self.max_steps["unshuffle"], discrete_actions=self.discrete_actions, require_done_action=self.require_done_action, locations_visited_in_walkthrough=np.array( tuple(walkthrough_task.visited_positions_xzrsh)), object_names_seen_in_walkthrough=copy.copy( walkthrough_task.seen_pickupable_objects | walkthrough_task.seen_openable_objects), metrics_from_walkthrough=walkthrough_task.metrics( force_return=True), task_spec_in_metrics=self.task_spec_in_metrics, ) return self._last_sampled_task
def generate_rearrangements_for_scenes( stage_seed: int, stage_scenes: List[str], env: RearrangeTHOREnvironment, object_types_to_not_move: Set[str], max_obj_rearrangements_per_scene: int = 5, scene_reuse_count: int = 50, obj_name_to_avoid_positions: Optional[Dict[str, np.ndarray]] = None, force_visible: bool = True, place_stationary: bool = False, rotation_increment: int = 30, ) -> dict: if 360 % rotation_increment != 0: raise ValueError("Rotation increment must be a factor of 360") if obj_name_to_avoid_positions is None: obj_name_to_avoid_positions = defaultdict( lambda: np.array([[-1000, -1000, -1000]])) controller = env.controller out: dict = dict() for scene in stage_scenes: print(f"Scene {scene}") seed = md5_hash_str_as_int(f"{stage_seed}|{scene}") random.seed(seed) out[scene] = [] # set positions and rotations controller.reset(scene) scene_has_openable = 0 != len([ o for o in controller.last_event.metadata["objects"] if o["openable"] and not o["pickupable"] ]) if not scene_has_openable: warnings.warn(f"SCENE {scene} HAS NO OPENABLE OBJECTS") evt = controller.step("GetReachablePositions") rps: List[Dict[str, float]] = evt.metadata["actionReturn"] rps.sort(key=lambda d: (round(d["x"], 2), round(d["z"], 2))) rotations = np.arange(0, 360, rotation_increment) for reuse_i in range(scene_reuse_count): try_count = 0 # Evenly distribute # of object rearrangements num_objs_to_open = scene_has_openable * (reuse_i % 2) num_objs_to_move = (1 - num_objs_to_open) + math.floor( max_obj_rearrangements_per_scene * (reuse_i / scene_reuse_count)) position_count_offset = 0 while True: try_count += 1 if try_count > 100: raise RuntimeError( f"Something wrong with scene {scene}, please file an issue." ) episode_seed_string = f"{scene}|ind_{reuse_i}|tries_{try_count}|counts_{position_count_offset}|seed_{stage_seed}" seed = md5_hash_str_as_int(episode_seed_string) random.seed(seed) # avoid agent being unable to teleport to position # due to object being placed there pos = random.choice(rps) rot = {"x": 0, "y": int(random.choice(rotations)), "z": 0} # used to make sure the positions of the objects # are not always the same across the same scene. start_kwargs = { "randomSeed": random.randint(0, int(1e7) - 1), "forceVisible": force_visible, "placeStationary": place_stationary, "excludedReceptacles": ["ToiletPaperHanger"], } target_kwargs = { "randomSeed": random.randint(0, int(1e7) - 1), "forceVisible": force_visible, "placeStationary": place_stationary, "excludedReceptacles": ["ToiletPaperHanger"], } # sometimes weird bugs arise where the pickupable # object count within a scene does not match ( opened_data, starting_poses, target_poses, ) = generate_one_rearrangement_given_initial_conditions( controller=controller, scene=scene, start_kwargs=start_kwargs, target_kwargs=target_kwargs, num_objs_to_move=num_objs_to_move + position_count_offset, num_objs_to_open=num_objs_to_open, object_types_to_not_move=object_types_to_not_move, agent_pos=pos, agent_rot=rot, allow_putting_objects_away=try_count >= 30, ) if opened_data is None: position_count_offset = max(position_count_offset - 1, 0) print( f"{episode_seed_string}: Failed during generation." # {scene}, {pos}, {int(rot['y'])} {start_kwargs}, {target_kwargs}." ) continue task_spec_dict = { "agent_position": pos, "agent_rotation": int(rot["y"]), "object_rearrangement_count": int(num_objs_to_move) + int(num_objs_to_open), "openable_data": opened_data, "starting_poses": starting_poses, "target_poses": target_poses, } env.reset( task_spec=RearrangeTaskSpec(scene=scene, **task_spec_dict)) env.shuffle() ips, gps, cps = env.poses pose_diffs = cast( List[Dict[str, Any]], env.compare_poses(goal_pose=gps, cur_pose=cps)) reachable_positions = env.controller.step( "GetReachablePositions").metadata["actionReturn"] failed = False for gp, cp, pd in zip(gps, cps, pose_diffs): if pd["iou"] is not None and pd["iou"] < IOU_THRESHOLD: assert gp["type"] not in object_types_to_not_move if gp["broken"] or cp["broken"]: failed = True break pose_diff_energy = env.pose_difference_energy(goal_pose=gp, cur_pose=cp) if pose_diff_energy != 0: obj_name = gp["name"] # Ensure that objects to rearrange are visible from somewhere interactable_poses = env.controller.step( "GetInteractablePoses", objectId=cp["objectId"], positions=reachable_positions, ).metadata["actionReturn"] if interactable_poses is None or len( interactable_poses) == 0: print( f"{episode_seed_string}: {obj_name} is not visible despite needing to be rearranged." ) failed = True break if obj_name in obj_name_to_avoid_positions: if cp["pickupable"]: threshold = 0.15 start_position = cp["position"] pos_array = np.array([[ start_position[k] for k in ["x", "y", "z"] ]]) elif cp["openness"] is not None: threshold = 0.05 pos_array = np.array([[cp["openness"]]]) else: continue dist = np.sqrt( ((obj_name_to_avoid_positions[obj_name] - pos_array)**2).sum(-1)).min() if dist <= threshold: print( f"{episode_seed_string}: {obj_name} is within the threshold ({dist} <= {threshold})." ) failed = True break if failed: continue npos_diff = int( sum(pd["iou"] is not None and pd["iou"] < IOU_THRESHOLD for pd in pose_diffs)) nopen_diff = int( sum(pd["openness_diff"] is not None and pd["openness_diff"] >= OPENNESS_THRESHOLD for pd in pose_diffs)) if npos_diff != num_objs_to_move: position_count_offset += (npos_diff < num_objs_to_move) - ( npos_diff > num_objs_to_move) position_count_offset = max(position_count_offset, 0) print( f"{episode_seed_string}: Incorrect amount of objects have moved expected != actual ({num_objs_to_move} != {npos_diff})" ) continue if nopen_diff != num_objs_to_open: print( f"{episode_seed_string}: Incorrect amount of objects have opened expected != actual ({num_objs_to_open} != {nopen_diff})" ) continue task_spec_dict["position_diff_count"] = npos_diff task_spec_dict["open_diff_count"] = nopen_diff task_spec_dict["pose_diff_energy"] = float( env.pose_difference_energy(goal_pose=gps, cur_pose=cps).sum()) if (npos_diff == 0 and nopen_diff == 0) or task_spec_dict["pose_diff_energy"] == 0.0: print( f"Not enough has moved in {scene}, {pos}, {int(rot['y'])} {start_kwargs}, {target_kwargs}!" ) continue if npos_diff > max_obj_rearrangements_per_scene or nopen_diff > 1: print( f"{episode_seed_string}: Final check failed ({npos_diff} [{max_obj_rearrangements_per_scene} max] pos. diffs," f" {nopen_diff} [1 max] opened)") continue out[scene].append(task_spec_dict) print(f"{episode_seed_string} SUCCESS") break return out