Ejemplo n.º 1
0
    def plan(self):
        """
        Runs the planner for one step from config to generate a
        subtrajectory, the resulting robot config after the robot executes
        the subtrajectory, and generates relevant planner data
        NOTE: this planner only considers obstacles in the environment, not
        collisions with other agents/personal space
        """
        assert(hasattr(self, 'planner'))
        assert(self.sim_dt is not None)
        # Generate the next trajectory segment, update next config, update actions/data
        if self.end_acting:
            return

        self.planner_data = self.planner.optimize(self.planned_next_config,
                                                  self.goal_config)
        traj_segment = \
            Trajectory.new_traj_clip_along_time_axis(self.planner_data['trajectory'],
                                                     self.params.control_horizon,
                                                     repeat_second_to_last_speed=True)
        self.planned_next_config = \
            SystemConfig.init_config_from_trajectory_time_index(
                traj_segment, t=-1)

        tr_acc = self.params.planner_params.track_accel
        self.trajectory.append_along_time_axis(traj_segment,
                                               track_trajectory_acceleration=tr_acc)
        self.enforce_termination_conditions()
Ejemplo n.º 2
0
    def act(self):
        """ A utility method to initialize a config object
        from a particular timestep of a given trajectory object"""
        if self.end_acting:
            # exit if there is no more acting to do
            return
        assert(self.sim_dt is not None)
        step = int(np.floor(self.sim_dt / self.params.dt))

        # first check for collisions with any other gen_agents
        self.check_collisions(self.world_state)

        # then update the current config incrementally (can teleport to end if t=-1)
        new_config = SystemConfig.init_config_from_trajectory_time_index(self.trajectory,
                                                                         t=self.path_step)
        self.set_current_config(new_config)

        # updating "next step" for agent path after traversing it
        self.path_step += step

        # considers a full on collision once the agent has passed its "collision point"
        if self.path_step >= self.trajectory.k or self.path_step >= self.collision_point_k:
            self.end_acting = True

        if self.end_acting:
            if self.params.verbose:
                print("terminated act for agent", self.get_name())
            # save memory by deleting control pipeline (very memory intensive)
            del self.planner

        # reset the collision cooldown (to "uncollided") if it has reached 0
        if(self.collision_cooldown > 0):
            self.collision_cooldown -= 1
Ejemplo n.º 3
0
    def _iterate(self, config):
        """ Runs the planner for one step from config to generate a
        subtrajectory, the resulting robot config after the robot executes
        the subtrajectory, and relevant planner data"""

        planner_data = self.planner.optimize(config)
        trajectory_segment, trajectory_data, commanded_actions_nkf = self._process_planner_data(config, planner_data)
        next_config = SystemConfig.init_config_from_trajectory_time_index(trajectory_segment, t=-1)
        return trajectory_segment, next_config, trajectory_data, commanded_actions_nkf
Ejemplo n.º 4
0
 def execute_velocity_cmds(self):
     for _ in range(self.num_cmds_per_batch):
         if self.get_end_acting():
             break
         current_config = self.get_current_config()
         # the command is indexed by self.num_executed and is safe due to the size constraints in the update()
         vel_cmd = self.joystick_inputs[self.num_executed]
         assert (len(vel_cmd) == 2)  # always a 2 tuple of v and w
         v = clip_vel(vel_cmd[0], self.v_bounds)
         w = clip_vel(vel_cmd[1], self.w_bounds)
         # NOTE: the format for the acceleration commands to the open loop for the robot is:
         # np.array([[[L, A]]], dtype=np.float32) where L is linear, A is angular
         command = np.array([[[v, w]]], dtype=np.float32)
         t_seg, _ = Agent.apply_control_open_loop(self,
                                                  current_config,
                                                  command,
                                                  1,
                                                  sim_mode='ideal')
         self.trajectory.append_along_time_axis(
             t_seg, track_trajectory_acceleration=True)
         # act trajectory segment
         self.current_config = \
             SystemConfig.init_config_from_trajectory_time_index(
                 t_seg, t=-1)