def _set_action(self, action): """Applies the given action to the simulation. """ u = self.get_state()[3:5] u[0] = self.max_v u[1] = (action[0] - (-1)) / 2 * (self.max_w + self.max_w) - self.max_w # use for differential guidence law self.last_pur_state = self.get_state() self.last_tar_state = self.get_goal() # u=[0,0] self.pursuitor_model.motion(u, self.dt) if self.evasion_mode: # if reach this point if np.linalg.norm( self.evasion_route[self.evasion_route_index, :2] - self.get_goal()[:2]) <= self.robot_radius * 2: self.evasion_route_index = (self.evasion_route_index + 1) % len(self.evasion_route) target_u, _ = dwa_control( self.get_goal(), self.get_goal()[3:5], self.config_evasion, self.evasion_route[self.evasion_route_index, :2], self.ob_list) self.evader_model.motion((target_u), self.dt) else: self.evader_model.motion(self.target_u, self.dt) assert not self.robot_collision_with_obstacle( self.evader_model.state[:2], self.target_radius, self.ob_list, self.ob_radius) x = self.get_state() self.traj = np.vstack((self.traj, x)) # store state history self.traj_pursuitor.append(self.get_state()) self.pursuit_strategy.append(action) self.traj_evader.append(self.get_goal())
def _set_action(self, action): """Applies the given action to the simulation. """ u = [0, 0] if action == 0: u, ltraj = dwa_control(self.get_state(), np.array(self.pursuitor_model.state[3:5]), self.config_dwa, self.get_goal()[:2], self.ob_list) elif action == 1: # u = apf_control(self.confg_apf, self.pursuitor_model.state, self.evader_model.state, self.ob_list) # # # get angle velocity from angle input # u[1] = self.pursuitor_model.rot_to_angle(u[1]) w = pn_control(self.confg_pn, self.get_state(), self.get_goal(), self.last_pur_state, self.last_tar_state) u = [self.max_v, w] self.last_pur_state = self.get_state() self.last_tar_state = self.get_goal() self.pursuitor_model.motion(u, self.dt) self.evader_model.motion(self.target_u, self.dt) assert not self.robot_collision_with_obstacle( self.evader_model.state[:2], self.target_radius, self.ob_list, self.ob_radius) x = self.get_state() self.traj = np.vstack((self.traj, x)) # store state history
def _set_action(self, action): """Applies the given action to the simulation. """ u = [0,0] if action == 0: u, ltraj = dwa_control(self.get_state(), np.array(self.pursuitor_model.state[3:5]), self.config_dwa, self.get_goal()[:2], self.ob_list) elif action == 1: w = pn_control(self.confg_pn, self.get_state(), self.get_goal(), self.last_pur_state, self.last_tar_state) u = [self.max_v, w] elif action == 2: u = apf_control(self.confg_apf, self.get_state(), self.get_goal(), self.ob_list) # get angle velocity from angle input u[1] = self.pursuitor_model.rot_to_angle(u[1]) # use for differential guidence law self.last_pur_state = self.get_state() self.last_tar_state = self.get_goal() # u=[0,0] self.pursuitor_model.motion(u, self.dt) if self.evasion_mode: # if reach this point if np.linalg.norm(self.evasion_route[self.evasion_route_index, :2]-self.get_goal()[:2]) <= self.robot_radius*2: self.evasion_route_index = (self.evasion_route_index+1) % len(self.evasion_route) target_u, _= dwa_control(self.get_goal(), self.get_goal()[3:5], self.config_evasion, self.evasion_route[self.evasion_route_index, :2], self.ob_list) self.evader_model.motion((target_u), self.dt) else: self.evader_model.motion(self.target_u, self.dt) assert not self.robot_collision_with_obstacle(self.evader_model.state[:2], self.target_radius, self.ob_list, self.ob_radius) x = self.get_state() self.traj = np.vstack((self.traj, x)) # store state history self.traj_pursuitor.append(self.get_state()) self.pursuit_strategy.append(action) self.traj_evader.append(self.get_goal())
# goal position [x(m), y(m)] target_model = RobotModel(2, 0, math.pi / 3 * 2, 0.7, math.pi / 3 * 5, gx, gy, math.pi / 2.0, 0.0, 0.0) goal = target_model.state[:2] # obstacles [x(m) y(m), ....] ob = np.array([[-1, -1], [0, 2], [4.0, 2.0], [5.0, 4.0], [5.0, 5.0], [5.0, 6.0], [5.0, 9.0], [8.0, 9.0], [7.0, 9.0], [12.0, 12.0]]) u = np.array([0.0, 0.0]) config = ConfigDWA(2, 0, math.pi / 3 * 2, 0.7, math.pi / 3 * 5) traj = np.array(x) for i in range(1000): u, ltraj = dwa_control(x, u, config, goal, ob) x = motionModel.motion(u, config.dt) target_model.motion([1.0, 0], config.dt) goal = target_model.state[:2] traj = np.vstack((traj, x)) # store state history # print(traj) if show_animation: plt.cla() plt.plot(ltraj[:, 0], ltraj[:, 1], "-g") plt.plot(x[0], x[1], "xr") plt.plot(goal[0], goal[1], "xb") plt.plot(ob[:, 0], ob[:, 1], "ok") plot_arrow(x[0], x[1], x[2])
min_u = np.zeros(3) config = Config() traj = np.array(pos) path = None # For now, there is no global path count = 0 # This will be a flag in the running loop # Initialize the debug module debug.dwa_msg_init() # Start the timer: my_thread = MyThread(1, "get_info", visionmodule=vision) my_thread.start() # Get into the running loop while True: u, ltraj, cost = dwa.dwa_control(pos, min_u, config, goal, path, ob) sender.send_action(0, u[0], u[1], u[2]) traj = np.vstack((traj, pos)) # store state history # display debug msg debug.dwa_msgs[0].text.text = "POS: " + str(pos) debug.dwa_msgs[1].text.text = "u: " + str(u) debug.dwa_msgs[2].text.text = "Final Cost: " + str(cost) debug.dwa_msgs[3].text.text = "Round" + str(count + 1) debug.dwa_msgs[4].line.start.x = int(100 * pos[0]) debug.dwa_msgs[4].line.start.y = int(-100 * pos[1]) debug.dwa_msgs[4].line.end.x = int(100 * ltraj[-1, 0]) debug.dwa_msgs[4].line.end.y = int(-100 * ltraj[-1, 1]) debug.send_msg(debug.messages) # check goal