Esempio n. 1
0
 def generate(simulator, params, robot_start_goal):
     assert (len(robot_start_goal) == 2)
     rob_start = generate_config_from_pos_3(robot_start_goal[0])
     rob_goal = generate_config_from_pos_3(robot_start_goal[1])
     robot_agent = RobotAgent.generate_robot_with_start_goal(
         rob_start, rob_goal)
     simulator.add_agent(robot_agent)
Esempio n. 2
0
    def init_control_pipeline(self):
        # NOTE: this is like an init() run *after* obtaining episode metadata
        # robot start and goal to satisfy the old Agent.planner
        self.start_config = generate_config_from_pos_3(self.get_robot_start())
        self.goal_config = generate_config_from_pos_3(self.get_robot_goal())
        # rest of the 'Agent' params used for the joystick planner
        self.agent_params = create_agent_params(with_obstacle_map=True)
        self.obstacle_map = self.init_obstacle_map()
        self.obj_fn = Agent._init_obj_fn(self, params=self.agent_params)
        #self.obj_fn.add_objective(
        #    Agent._init_psc_objective(params=self.agent_params))

        # Initialize Fast-Marching-Method map for agent's pathfinding
        self.fmm_map = Agent._init_fmm_map(self, params=self.agent_params)
        Agent._update_fmm_map(self)

        # Initialize system dynamics and planner fields
        self.planner = Agent._init_planner(self, params=self.agent_params)
        self.vehicle_data = self.planner.empty_data_dict()
        self.system_dynamics = Agent._init_system_dynamics(
            self, params=self.agent_params)
        # init robot current config from the starting position
        self.robot_current = self.current_ep.get_robot_start().copy()
        # init a list of commands that will be sent to the robot
        self.commands = None
Esempio n. 3
0
    def init_control_pipeline(self):
        self.robot_goal_final = self.get_robot_goal()
        self.goal_config = generate_config_from_pos_3(self.robot_goal_final)
        env = self.current_ep.get_environment()
        self.environment = self.init_obstacle_map(env)
        self.robot = self.get_robot_start()
        self.agents = {}
        agents_info = self.current_ep.get_agents()
        for key in list(agents_info.keys()):
            agent = agents_info[key]
            self.agents[key] = agent.get_current_config().to_3D_numpy()

        self.agent_params = create_agent_params(with_obstacle_map=True)
        self.obstacle_map = self.init_obstacle_map_ckpt()
        self.obj_fn = Agent._init_obj_fn(self, params=self.agent_params)
        #self.obj_fn.add_objective(
        #    Agent._init_psc_objective(params=self.agent_params))
        self.fmm_map = Agent._init_fmm_map(self, params=self.agent_params)
        Agent._update_fmm_map(self)
        self.planner = Agent._init_planner(self, params=self.agent_params)
        self.vehicle_data = self.planner.empty_data_dict()
        self.system_dynamics = Agent._init_system_dynamics(
            self, params=self.agent_params)

        self.robot_goal = self.robot
        self.commands = None
        return
Esempio n. 4
0
 def from_json(json_str: dict):
     name = json_str['name']
     if 'start_config' in json_str.keys():
         start_config = \
             generate_config_from_pos_3(json_str['start_config'])
     else:
         start_config = None
     if 'goal_config' in json_str.keys():
         goal_config = \
             generate_config_from_pos_3(json_str['goal_config'])
     else:
         goal_config = None
     current_config = \
         generate_config_from_pos_3(json_str['current_config'])
     trajectory = None  # unable to recreate trajectory
     radius = json_str['radius']
     collision_cooldown = -1
     collided = False
     end_acting = False
     color = None
     return AgentState(None, name, goal_config, start_config,
                       current_config, trajectory, collided, end_acting,
                       collision_cooldown, radius, color)
Esempio n. 5
0
    def query_next_ckpt(self):
        robot_v_norm = np.sqrt(self.robot_v[0]**2 + self.robot_v[1]**2)
        robot_w = self.robot_v[2]
        robot_config = generate_config_from_pos_3(self.robot,
                                                  dt=self.agent_params.dt,
                                                  v=robot_v_norm,
                                                  w=robot_w)
        planner_data = self.planner.optimize(robot_config,
                                             self.goal_config,
                                             sim_state_hist=self.sim_states)
        tmp_goal = Trajectory.new_traj_clip_along_time_axis(
            planner_data['trajectory'],
            self.agent_params.control_horizon,
            repeat_second_to_last_speed=True)
        robot_goal = self.from_conf(tmp_goal, -1)

        return robot_goal
Esempio n. 6
0
 def execute_position_cmds(self):
     for _ in range(self.num_cmds_per_batch):
         if self.get_end_acting():
             break
         joystick_input = self.joystick_inputs[self.num_executed]
         assert (len(joystick_input) == 4)  # has x,y,theta,velocity
         new_pos3 = joystick_input[:3]
         new_v = joystick_input[3]
         old_pos3 = self.current_config.to_3D_numpy()
         # ensure the new position is reachable within velocity bounds
         new_pos3 = clip_posn(Agent.sim_dt, old_pos3, new_pos3,
                              self.v_bounds)
         # move to the new position and update trajectory
         new_config = generate_config_from_pos_3(new_pos3, v=new_v)
         self.set_current_config(new_config)
         self.trajectory.append_along_time_axis(
             new_config, track_trajectory_acceleration=True)
Esempio n. 7
0
    def joystick_plan(self):
        """ Runs the planner for one step from config to generate a
        subtrajectory, the resulting robot config after the robot executes
        the subtrajectory, and relevant planner data
        - Access to sim_states from the self.current_world
        """
        # get information about robot by its "current position" which was updated in sense()
        [x, y, th] = self.robot_current
        v = self.robot_v
        # can also try:
        #     # assumes the robot has executed all the previous commands in self.commands
        #     (x, y, th, v) = self.from_conf(self.commands, -1)
        robot_config = generate_config_from_pos_3(pos_3=(x, y, th), v=v)
        self.planner_data = self.planner.optimize(
            robot_config, self.goal_config, sim_state_hist=self.sim_states)

        # TODO: make sure the planning control horizon is greater than the
        # simulator_joystick_update_ratio else it will not plan far enough

        # LQR feedback control loop
        self.commands = Trajectory.new_traj_clip_along_time_axis(
            self.planner_data['trajectory'],
            self.agent_params.control_horizon,
            repeat_second_to_last_speed=True)
Esempio n. 8
0
    def joystick_plan(self):
        """ Runs the planner for one step from config to generate a
        subtrajectory, the resulting robot config after the robot executes
        the subtrajectory, and relevant planner data
        - Access to sim_states from the self.current_world
        """
        robot_config = generate_config_from_pos_3(self.robot_current,
                                                  dt=self.agent_params.dt,
                                                  v=self.robot_v,
                                                  w=self.robot_w)
        self.planner_data = self.planner.optimize(
            robot_config, self.goal_config, sim_state_hist=self.sim_states)

        # TODO: make sure the planning control horizon is greater than the
        # simulator_joystick_update_ratio else it will not plan far enough

        # LQR feedback control loop
        t_seg = Trajectory.new_traj_clip_along_time_axis(
            self.planner_data['trajectory'],
            self.agent_params.control_horizon,
            repeat_second_to_last_speed=True)
        # From the new planned subtrajectory, parse it for the requisite v & w commands
        _, cmd_actions_nkf = self.system_dynamics.parse_trajectory(t_seg)
        self.commands = cmd_actions_nkf[0]