def _get_h12(self, inputs): x = inputs["pose_in_map_at_t"] h = inputs["map_at_t"] h_1 = crop_map(h, x[:, :2], self.G) h_2 = F.adaptive_max_pool2d(h, (self.G, self.G)) h_12 = torch.cat([h_1, h_2], dim=1) return h_12
def _compute_local_map_crop(self, global_map, global_pose): local_crop_size = self.config.LOCAL_POLICY.embed_map_size exp_crop_size = int(1.5 * local_crop_size) cropped_map = crop_map( global_map, self.states["curr_map_position"], exp_crop_size ) global_heading = global_pose[:, 2] # (bs, ) pose in radians rotation_params = torch.stack( [ torch.zeros_like(global_heading), torch.zeros_like(global_heading), global_heading, ], dim=1, ) rotated_map = spatial_transform_map(cropped_map, rotation_params) center_locs = torch.zeros_like(self.states["curr_map_position"]) center_locs[:, 0] = rotated_map.shape[3] // 2 center_locs[:, 1] = rotated_map.shape[2] // 2 rotated_map = crop_map(rotated_map, center_locs, local_crop_size) return rotated_map
def _compute_gt_local_action( self, global_map, agent_world_xyt, goal_world_xyt, M, s ): """ Estimate the shortest-path action from agent position to goal position. """ agent_map_xy = convert_world2map(agent_world_xyt, (M, M), s) goal_map_xy = convert_world2map(goal_world_xyt, (M, M), s) # ============ Crop a region covering agent_map_xy and goal_map_xy ============ abs_delta_xy = torch.abs(agent_map_xy - goal_map_xy) S = max(int(torch.max(abs_delta_xy).item()), 80) old_center_xy = (agent_map_xy + goal_map_xy) // 2 # (bs, 2) cropped_global_map = crop_map(global_map, old_center_xy, int(S)) global_map_np = self._process_maps(cropped_global_map) # (bs, M, M) new_center_xy = torch.zeros_like(old_center_xy) new_center_xy[:, 0] = global_map_np.shape[2] // 2 new_center_xy[:, 1] = global_map_np.shape[1] // 2 # ================ Transform points to new coordinate system ================== new_agent_map_xy = agent_map_xy + new_center_xy - old_center_xy new_goal_map_xy = goal_map_xy + new_center_xy - old_center_xy map_xy_np = asnumpy(new_agent_map_xy).astype(np.int32) # (bs, 2) goal_xy_np = asnumpy(new_goal_map_xy).astype(np.int32) # Ensure that points do not go outside map limits map_W = global_map_np.shape[2] map_H = global_map_np.shape[1] map_xy_np[:, 0] = np.clip(map_xy_np[:, 0], 0, map_W - 1) map_xy_np[:, 1] = np.clip(map_xy_np[:, 1], 0, map_H - 1) goal_xy_np[:, 0] = np.clip(goal_xy_np[:, 0], 0, map_W - 1) goal_xy_np[:, 1] = np.clip(goal_xy_np[:, 1], 0, map_H - 1) sample_flag = [1.0 for _ in range(self.nplanners)] # Compute plan plans_xy = self.planner.plan(global_map_np, map_xy_np, goal_xy_np, sample_flag) # ===================== Sample an action to a nearby point ==================== # 0 is forward, 1 is left, 2 is right gt_actions = [] forward_step = self.config.LOCAL_POLICY.AGENT_DYNAMICS.forward_step turn_angle = math.radians(self.config.LOCAL_POLICY.AGENT_DYNAMICS.turn_angle) for i in range(self.nplanners): path_x, path_y = plans_xy[i] # If planning failed, sample random action if path_x is None: gt_actions.append(random.randint(0, 2)) continue # Plan to navigate to a point 0.5 meters away dl = min(int(0.5 / s), len(path_x) - 1) # The path is in reverse order goal_x, goal_y = path_x[-dl], path_y[-dl] agent_x, agent_y = map_xy_np[i].tolist() # Decide action agent_heading = agent_world_xyt[i, 2].item() - math.pi / 2 reqd_heading = math.atan2(goal_y - agent_y, goal_x - agent_x) diff_angle = reqd_heading - agent_heading diff_angle = math.atan2(math.sin(diff_angle), math.cos(diff_angle)) # Move forward if facing the correct direction if abs(diff_angle) < 1.5 * turn_angle: gt_actions.append(0) # Turn left if the goal is to the left elif diff_angle < 0: gt_actions.append(1) # Turn right otherwise else: gt_actions.append(2) return torch.Tensor(gt_actions).unsqueeze(1).long()
def _compute_plans( self, global_map, agent_map_xy, goal_map_xy, sample_goal_flags, cache_map=False, crop_map_flag=True, ): """ global_map - (bs, 2, V, V) tensor agent_map_xy - (bs, 2) agent's current position on the map goal_map_xy - (bs, 2) goal's current position on the map sample_goal_flags - list of zeros and ones should a new goal be sampled? """ # ==================== Process the map to get planner map ===================== s = self.map_scale # Processed map has zeros for free-space and ones for obstacles global_map_proc = self._process_maps( global_map, goal_map_xy ) # (bs, M, M) tensor # =================== Crop a local region around agent, goal ================== if crop_map_flag: # Determine crop size abs_diff_xy = torch.abs(agent_map_xy - goal_map_xy) S = int(torch.max(abs_diff_xy).item()) # Add a small buffer space around the agent location buffer_size = int(3.0 / s) # 3 meters buffer space in total S += buffer_size old_center_xy = (agent_map_xy + goal_map_xy) / 2 # Crop a SxS region centered around old_center_xy # Note: The out-of-bound regions will be zero-padded by default. In this case, # since zeros correspond to free-space, that is not a problem. cropped_global_map = crop_map( global_map_proc.unsqueeze(1), old_center_xy, S ).squeeze( 1 ) # (bs, S, S) # Add zero padding to ensure the plans don't fail due to cropping pad_size = 5 cropped_global_map = F.pad( cropped_global_map, (pad_size, pad_size, pad_size, pad_size) ) S += pad_size * 2 # Transform to new coordinates new_center_xy = torch.ones_like(old_center_xy) * (S / 2) new_agent_map_xy = agent_map_xy + (new_center_xy - old_center_xy) new_goal_map_xy = goal_map_xy + (new_center_xy - old_center_xy) else: cropped_global_map = global_map_proc # (bs, M, M) old_center_xy = (agent_map_xy + goal_map_xy) / 2 new_center_xy = old_center_xy new_agent_map_xy = agent_map_xy new_goal_map_xy = goal_map_xy S = cropped_global_map.shape[1] if cache_map: self._cropped_global_map = cropped_global_map # Clip points to ensure they are within map limits new_agent_map_xy = torch.clamp(new_agent_map_xy, 0, S - 1) new_goal_map_xy = torch.clamp(new_goal_map_xy, 0, S - 1) # Convert to numpy agent_map_xy_np = asnumpy(new_agent_map_xy).astype(np.int32) goal_map_xy_np = asnumpy(new_goal_map_xy).astype(np.int32) global_map_np = asnumpy(cropped_global_map) # =================== Plan path from agent to goal positions ================== plans = self.planner.plan( global_map_np, agent_map_xy_np, goal_map_xy_np, sample_goal_flags ) # List of tuple of lists # Convert plans back to original coordinates final_plans = [] for i in range(len(plans)): plan_x, plan_y = plans[i] # Planning failure if plan_x is None: final_plans.append((plan_x, plan_y)) continue offset_x = int((old_center_xy[i, 0] - new_center_xy[i, 0]).item()) offset_y = int((old_center_xy[i, 1] - new_center_xy[i, 1]).item()) final_plan_x, final_plan_y = [], [] for px, py in zip(plan_x, plan_y): final_plan_x.append(px + offset_x) final_plan_y.append(py + offset_y) final_plans.append((final_plan_x, final_plan_y)) return final_plans