def _convert_obs(self): robot = self.soadrl_sim.robot # lidar obs lidar_pos = np.array([robot.px, robot.py, robot.theta], dtype=np.float32) ranges = np.ones((self.n_angles,), dtype=np.float32) * 25. angles = np.linspace(self.kLidarMergedMinAngle, self.kLidarMergedMaxAngle-self.kLidarAngleIncrement, self.n_angles) + lidar_pos[2] render_contours_in_lidar(ranges, angles, self.flat_contours, lidar_pos[:2]) # agents other_agents = [] for i, human in enumerate(self.soadrl_sim.humans): pos = np.array([human.px, human.py, human.theta], dtype=np.float32) dist = self.distances_travelled_in_base_frame[i].astype(np.float32) vel = np.array([human.vx, human.vy], dtype=np.float32) if self.lidar_legs: agent = CSimAgent(pos, dist, vel) else: agent = CSimAgent(pos, dist, vel, type_="trunk", radius=human.radius) other_agents.append(agent) # apply through converter map (res 1., origin 0,0 -> i,j == x,y) self.converter_cmap2d.render_agents_in_lidar(ranges, angles, other_agents, lidar_pos[:2]) self.lidar_scan = ranges self.lidar_angles = angles # robotstate obs # shape (n_agents, 5 [grx, gry, vx, vy, vtheta]) - all in base frame baselink_in_world = np.array([robot.px, robot.py, robot.theta]) world_in_baselink = inverse_pose2d(baselink_in_world) robotvel_in_world = np.array([robot.vx, robot.vy, 0]) # TODO: actual robot rot vel? robotvel_in_baselink = apply_tf_to_vel(robotvel_in_world, world_in_baselink) goal_in_world = np.array([robot.gx, robot.gy, 0]) goal_in_baselink = apply_tf_to_pose(goal_in_world, world_in_baselink) robotstate_obs = np.hstack([goal_in_baselink[:2], robotvel_in_baselink]) obs = (self.lidar_scan, robotstate_obs) return obs
def visuals_loop(self): while True: xx, yy, clusters, is_legs, cogs, radii = self.clusters_data # past positions tf_past_in_rob = [] if self.tf_pastrobs_in_fix: tf_rob_in_fix = self.tf_pastrobs_in_fix[-1] for i, tf_a in enumerate(self.tf_pastrobs_in_fix[::-1]): tf_b = apply_tf_to_pose( Pose2D(tf_a), inverse_pose2d(Pose2D(tf_rob_in_fix))) tf_past_in_rob.append(tf_b) # Plotting plt.figure("clusters") plt.cla() plt.scatter(xx, yy, zorder=1, facecolor=(1, 1, 1, 1), edgecolor=(0, 0, 0, 1), color='k', marker='.') # for x, y in zip(xx, yy): # plt.plot([0, x], [0, y], linewidth=0.01, color='red' , zorder=2) plt.scatter([0], [0], marker='+', color='k', s=1000) for c in clusters: plt.plot(xx[c], yy[c], zorder=2) for l, cog, r in zip(is_legs, cogs, radii): if l: patch = patches.Circle(cog, r, facecolor=(0, 0, 0, 0), edgecolor=(0, 0, 0, 1), linewidth=3) else: patch = patches.Circle(cog, r, facecolor=(0, 0, 0, 0), edgecolor=(0, 0, 0, 1), linestyle='--') plt.gca().add_artist(patch) if tf_past_in_rob: xy_past = np.array(tf_past_in_rob) plt.plot(xy_past[:, 0], xy_past[:, 1], color='red') plt.gca().set_aspect('equal', adjustable='box') LIM = 10 plt.ylim([-LIM, LIM]) plt.xlim([-LIM, LIM]) # pause plt.pause(0.1) rospy.timer.sleep(0.01)
def _update_dist_travelled(self): """ update dist travel var used for animating legs """ # for each human, get vel in base frame for i, human in enumerate(self.soadrl_sim.humans): # dig up rotational velocity from past states log vrot = 0. if len(self.soadrl_sim.states) > 1: vrot = (self.soadrl_sim.states[-1][1][i].theta - self.soadrl_sim.states[-2][1][i].theta) / self._get_dt() # transform world vel to base vel baselink_in_world = np.array([human.px, human.py, human.theta]) world_in_baselink = inverse_pose2d(baselink_in_world) vel_in_world_frame = np.array([human.vx, human.vy, vrot]) vel_in_baselink_frame = apply_tf_to_vel(vel_in_world_frame, world_in_baselink) self.distances_travelled_in_base_frame[i, :] += vel_in_baselink_frame * self._get_dt()
def execute_current_task(self, agent_index): # TODO run a task-specific controller instead task = self.current_tasks[agent_index] task_start = self.task_start_times[agent_index] if task is None or task_start is None: return np.array([0,0,0]) # get action from current task N = int(NAIVE_AGENT_LIVE_WAYPOINT_DISTANCE / self.iaenv.worldstate.map.resolution()) waypoint_idx = min(task['tasktargetpathidx'], N) waypoint = task['taskfullpath'][waypoint_idx] vp_pos = self.rlenv.virtual_peppers[agent_index].pos waypoint_in_vp_frame = pose2d.apply_tf(waypoint, pose2d.inverse_pose2d(vp_pos)) theta = np.arctan2(waypoint_in_vp_frame[1], waypoint_in_vp_frame[0]) delta = np.array([waypoint_in_vp_frame[0], waypoint_in_vp_frame[1], theta]) action = delta # apply actions to iaenv, such as SAY # TODO # end task task_elapsed = self.get_sim_time() - task_start if task_elapsed > self.get_max_task_alotted_time(): self.reset_task(agent_index) if self.args.verbose: print("{:.1f} | {}: Agent {} | task timed-out".format( self.get_walltime_since_sim_start_sec(), self.get_sim_time_sec(), agent_index)) # reset blocked collision episodes for agent (new chance to be allowed through) for j in range(self.n_sim_agents): if agent_index == j: continue if self.collision_episodes[agent_index, j] == 1: self.collision_episodes[agent_index, j] = 0 self.collision_episodes[j, agent_index] = 0 # we should stop, but I kind of like the idle movement. just slow down. if isinstance(task['taskaction'], actions.Loiter): action = action * 0.8 else: # check if task succeeded if np.linalg.norm(vp_pos[:2] - waypoint) < self.get_agent_radius(agent_index): self.reset_task(agent_index) action = np.array([0,0,0]) if self.args.verbose: print("Agent {} | task succeeded".format(agent_index)) return action
def turn_to_face_goal_callback(self, event=None): with self.lock: if self.plan_executor_node.task_being_executed is not None: return if self.refmap_manager.tf_frame_in_refmap is None: return if self.goal_xy_in_refmap is None: return p2_refmap_in_robot = inverse_pose2d( Pose2D(self.refmap_manager.tf_frame_in_refmap)) goal_xy_in_robot = apply_tf(self.goal_xy_in_refmap, p2_refmap_in_robot) rospy.loginfo_throttle(5., "turning to face goal") gx, gy = goal_xy_in_robot angle_to_goal = np.arctan2(gy, gx) # [-pi, pi] w = 0 if np.abs(angle_to_goal) > ((np.pi / 4) / 10): # deadzone w = 0.3 * np.sign(angle_to_goal) if not self.STOP: cmd_vel_msg = Twist() cmd_vel_msg.angular.z = w self.cmd_vel_pub.publish(cmd_vel_msg)
def planner_routine(self, event=None): nowstamp = rospy.Time.now() if self.tf_wpt_in_fix is None: rospy.logwarn_throttle(10., "Global path not available yet") return if self.tf_rob_in_fix is None: rospy.logwarn_throttle(10., "tf_rob_in_fix not available yet") return if self.odom is None: rospy.logwarn_throttle(10., "odom not available yet") return # remove old obstacles from buffer k2s = 2. with self.lock: tic = timer() dynamic_obstacles = [] self.obstacles_buffer = [ obst_msg for obst_msg in self.obstacles_buffer if nowstamp - obst_msg.header.stamp < rospy.Duration(k2s) ] # DEBUG remove # for obst_msg in self.obstacles_buffer: for obst_msg in self.obstacles_buffer[-1:]: dynamic_obstacles.extend(obst_msg.obstacles) # take into account static obstacles if not old static_obstacles = [] if self.static_obstacles_buffer is not None: if (nowstamp - self.static_obstacles_buffer.header.stamp ) < rospy.Duration(k2s): static_obstacles = self.static_obstacles_buffer.obstacles # vel to goal in fix goal_in_fix = np.array(Pose2D(self.tf_wpt_in_fix)[:2]) toc = timer() print("Lock preproc {:.1f} ms".format((toc - tic) * 1000.)) # robot position tf_rob_in_fix = self.tf_rob_in_fix tic = timer() # RVO set up positions = [] radii = [] velocities = [] obstacles = [] metadata = [] # agent metadata # create agents for RVO # first agent is the robot positions.append(Pose2D(tf_rob_in_fix)[:2]) radii.append(self.kRadius) velocities.append( [self.odom.twist.twist.linear.x, self.odom.twist.twist.linear.y]) metadata.append({'source': 'robot'}) # create agent for every dynamic obstacle for obst in dynamic_obstacles: positions.append( [obst.polygon.points[0].x, obst.polygon.points[0].y]) radii.append(obst.radius) velocities.append([ obst.velocities.twist.linear.x, obst.velocities.twist.linear.y ]) metadata.append({'source': 'dynamic'}) # create agents for local static obstacles (table leg, chair, etc) for obst in static_obstacles: if obst.radius > 0.5: continue # TODO big walls make big circles : but dangerous to do this! positions.append( [obst.polygon.points[0].x, obst.polygon.points[0].y]) radii.append(obst.radius) velocities.append([ obst.velocities.twist.linear.x, obst.velocities.twist.linear.y ]) metadata.append({'source': 'nonleg'}) # create static obstacle vertices from refmap if available (walls, etc) if self.refmap_manager.tf_frame_in_refmap is not None: kMapObstMaxDist = 3. obstacles_in_ref = self.refmap_manager.map_as_closed_obstacles pose2d_ref_in_fix = inverse_pose2d( Pose2D(self.refmap_manager.tf_frame_in_refmap)) near_obstacles = [ apply_tf(vertices, pose2d_ref_in_fix) for vertices in obstacles_in_ref if len(vertices) > 1 ] near_obstacles = [ vertices for vertices in near_obstacles if np.mean( np.linalg.norm(vertices - np.array(positions[0]), axis=1)) < kMapObstMaxDist ] obstacles = near_obstacles toc = timer() print("Pre-RVO {:.1f} ms".format((toc - tic) * 1000.)) tic = timer() # RVO ---------------- import rvo2 t_horizon = 10. DT = 1 / 10. #RVOSimulator(timeStep, neighborDist, maxNeighbors, timeHorizon, timeHorizonObst, radius, maxSpeed, velocity = [0,0]); sim = rvo2.PyRVOSimulator(DT, 1.5, 5, 1.5, 2, 0.4, 2) # Pass either just the position (the other parameters then use # the default values passed to the PyRVOSimulator constructor), # or pass all available parameters. agents = [] for p, v, r, m in zip(positions, velocities, radii, metadata): #addAgent(posxy, neighborDist, maxNeighbors, timeHorizon, timeHorizonObst, radius, maxSpeed, velocity = [0,0]); a = sim.addAgent(tuple(p), radius=r, velocity=tuple(v)) agents.append(a) # static agents can't move if m['source'] != 'robot' and np.linalg.norm(v) == 0: sim.setAgentMaxSpeed(a, 0) # Obstacle(list of vertices), anticlockwise (clockwise for bounding obstacle) for vert in obstacles: o1 = sim.addObstacle(list(vert)) sim.processObstacles() toc = timer() print("RVO init {:.1f} ms".format((toc - tic) * 1000.)) tic = timer() n_steps = int(t_horizon / DT) pred_tracks = [[] for a in agents] pred_vels = [[] for a in agents] pred_t = [] for step in range(n_steps): for i in range(len(agents)): # TODO: early stop if agent 0 reaches goal a = agents[i] pref_vel = velocities[ i] # assume agents want to keep initial vel if i == 0: vector_to_goal = goal_in_fix - np.array( sim.getAgentPosition(a)) pref_vel = self.kIdealVelocity * vector_to_goal / np.linalg.norm( vector_to_goal) sim.setAgentPrefVelocity(a, tuple(pref_vel)) pred_tracks[i].append(sim.getAgentPosition(a)) pred_vels[i].append(sim.getAgentVelocity(a)) pred_t.append(1. * step * DT) sim.doStep() # ------------------------- toc = timer() print("RVO steps {} - {} agents - {} obstacles".format( step, len(agents), len(obstacles))) print("RVO sim {:.1f} ms".format((toc - tic) * 1000.)) # PLOT --------------------------- # self.transport_data = (sim, agents, metadata, radii, obstacles, pred_tracks) # ----------------------------- # publish predicted tracks as paths rob_track = pred_tracks[0] pub = rospy.Publisher("/rvo_robot_plan", Path, queue_size=1) path_msg = Path() path_msg.header.stamp = nowstamp path_msg.header.frame_id = self.kFixedFrame for pose, t in zip(rob_track, pred_t): pose_msg = PoseStamped() pose_msg.header.stamp = nowstamp + rospy.Duration(t) pose_msg.header.frame_id = path_msg.header.frame_id pose_msg.pose.position.x = pose[0] pose_msg.pose.position.y = pose[1] path_msg.poses.append(pose_msg) pub.publish(path_msg) # publish tracks pub = rospy.Publisher("/rvo_simulated_tracks", MarkerArray, queue_size=1) ma = MarkerArray() id_ = 0 end_x = [sim.getAgentPosition(agent_no)[0] for agent_no in agents] end_y = [sim.getAgentPosition(agent_no)[1] for agent_no in agents] end_t = pred_t[-1] for i in range(len(agents)): color = (0, 0, 0, 1) # black color = (1, .8, 0, 1) if metadata[i]['source'] == 'robot' else color # green color = (1, 0, 0, 1) if metadata[i]['source'] == 'dynamic' else color # red color = (0, 0, 1, 1) if metadata[i]['source'] == 'nonleg' else color # blue # track line mk = Marker() mk.lifetime = rospy.Duration(0.1) mk.header.frame_id = self.kFixedFrame mk.ns = "tracks" mk.id = i mk.type = 4 # LINE_STRIP mk.action = 0 mk.scale.x = 0.02 mk.color.r = color[0] mk.color.g = color[1] mk.color.b = color[2] mk.color.a = color[3] mk.frame_locked = True xx = np.array(pred_tracks[i])[:, 0] yy = np.array(pred_tracks[i])[:, 1] for x, y, t in zip(xx, yy, pred_t): pt = Point() pt.x = x pt.y = y pt.z = t / t_horizon mk.points.append(pt) ma.markers.append(mk) # endpoint r = radii[i] mk = Marker() mk.lifetime = rospy.Duration(0.1) mk.header.frame_id = self.kFixedFrame mk.ns = "endpoints" mk.id = i mk.type = 3 # CYLINDER mk.action = 0 mk.scale.x = r * 2. mk.scale.y = r * 2. mk.scale.z = 0.1 mk.color.r = color[0] mk.color.g = color[1] mk.color.b = color[2] mk.color.a = color[3] mk.frame_locked = True mk.pose.position.x = end_x[i] mk.pose.position.y = end_y[i] mk.pose.position.z = end_t / t_horizon ma.markers.append(mk) pub.publish(ma) pred_rob_vel_in_fix = np.array( pred_vels[0] [1]) # 2 is a cheat because 0 is usually the current speed pred_end_in_fix = np.array([end_x[0], end_y[0]]) # in robot frame pose2d_fix_in_rob = inverse_pose2d(Pose2D(tf_rob_in_fix)) pred_rob_vel_in_rob = apply_tf_to_vel( np.array(list(pred_rob_vel_in_fix) + [0]), pose2d_fix_in_rob)[:2] pred_end_in_rob = apply_tf(pred_end_in_fix, pose2d_fix_in_rob) # resulting command vel, and path best_u, best_v = pred_rob_vel_in_rob # check if goal is reached if np.linalg.norm(pred_end_in_rob) < 0.1: best_u, best_v = (0, 0) # Slow turn towards goal # TODO best_w = 0 WMAX = 0.5 gx, gy = pred_end_in_rob angle_to_goal = np.arctan2(gy, gx) # [-pi, pi] if np.sqrt(gx * gx + gy * gy) > 0.5: # turn only if goal is far away if np.abs(angle_to_goal) > (np.pi / 4 / 10): # deadzone best_w = np.clip(angle_to_goal, -WMAX, WMAX) # linear ramp cmd_vel_pub = rospy.Publisher("/cmd_vel", Twist, queue_size=1) if not self.STOP: # publish cmd_vel cmd_vel_msg = Twist() cmd_vel_msg.linear.x = best_u cmd_vel_msg.linear.y = best_v cmd_vel_msg.angular.z = best_w cmd_vel_pub.publish(cmd_vel_msg)
def render(self, mode='human', close=False, RENDER_LIDAR=True, lidar_scan_override=None, goal_override=None, save_to_file=False, show_score=False, robocentric=False): if close: if self.viewer is not None: self.viewer.close() return if self.lidar_scan is None: # check that reset has been called return if mode == 'traj': self.soadrl_sim.render('traj') elif mode in ['human', 'rings']: # Window and viewport size WINDOW_W = 256 WINDOW_H = 256 VP_W = WINDOW_W VP_H = WINDOW_H from gym.envs.classic_control import rendering import pyglet from pyglet import gl # Create viewer if self.viewer is None: self.viewer = rendering.Viewer(WINDOW_W, WINDOW_H) self.transform = rendering.Transform() self.transform.set_scale(10, 10) self.transform.set_translation(128, 128) self.score_label = pyglet.text.Label( '0000', font_size=12, x=20, y=WINDOW_H*2.5/40.00, anchor_x='left', anchor_y='center', color=(255,255,255,255)) # self.transform = rendering.Transform() self.currently_rendering_iteration = 0 self.image_lock = threading.Lock() def make_circle(c, r, res=10): thetas = np.linspace(0, 2*np.pi, res+1)[:-1] verts = np.zeros((res, 2)) verts[:,0] = c[0] + r * np.cos(thetas) verts[:,1] = c[1] + r * np.sin(thetas) return verts # Render in pyglet with self.image_lock: self.currently_rendering_iteration += 1 self.viewer.draw_circle(r=10, color=(0.3,0.3,0.3)) win = self.viewer.window win.switch_to() win.dispatch_events() win.clear() gl.glViewport(0, 0, VP_W, VP_H) # colors bgcolor = np.array([0.4, 0.8, 0.4]) obstcolor = np.array([0.3, 0.3, 0.3]) goalcolor = np.array([1., 1., 0.3]) goallinecolor = 0.9 * bgcolor nosecolor = np.array([0.3, 0.3, 0.3]) lidarcolor = np.array([1., 0., 0.]) agentcolor = np.array([0., 1., 1.]) # Green background gl.glBegin(gl.GL_QUADS) gl.glColor4f(bgcolor[0], bgcolor[1], bgcolor[2], 1.0) gl.glVertex3f(0, VP_H, 0) gl.glVertex3f(VP_W, VP_H, 0) gl.glVertex3f(VP_W, 0, 0) gl.glVertex3f(0, 0, 0) gl.glEnd() # Transform rx = self.soadrl_sim.robot.px ry = self.soadrl_sim.robot.py rth = self.soadrl_sim.robot.theta if robocentric: # sets viewport = robocentric a.k.a T_sim_in_viewport = T_sim_in_robocentric from pose2d import inverse_pose2d T_sim_in_robot = inverse_pose2d(np.array([rx, ry, rth])) # T_robot_in_robocentric is trans(128, 128), scale(10), rot(90deg) # T_sim_in_robocentric = T_sim_in_robot * T_robot_in_robocentric rot = np.pi / 2. scale = 20 trans = (WINDOW_W / 2., WINDOW_H / 2.) T_sim_in_robocentric = [ trans[0] + scale * (T_sim_in_robot[0] * np.cos(rot) - T_sim_in_robot[1] * np.sin(rot)), trans[1] + scale * (T_sim_in_robot[0] * np.sin(rot) + T_sim_in_robot[1] * np.cos(rot)), T_sim_in_robot[2] + rot, ] self.transform.set_translation(T_sim_in_robocentric[0], T_sim_in_robocentric[1]) self.transform.set_rotation(T_sim_in_robocentric[2]) self.transform.set_scale(scale, scale) # self.transform.set_scale(20, 20) self.transform.enable() # applies T_sim_in_viewport to below coords (all in sim frame) # Map closed obstacles --- for poly in self.soadrl_sim.obstacle_vertices: gl.glBegin(gl.GL_LINE_LOOP) gl.glColor4f(obstcolor[0], obstcolor[1], obstcolor[2], 1) for vert in poly: gl.glVertex3f(vert[0], vert[1], 0) gl.glEnd() # LIDAR if RENDER_LIDAR: px = self.soadrl_sim.robot.px py = self.soadrl_sim.robot.py angle = self.soadrl_sim.robot.theta # LIDAR rays scan = lidar_scan_override if scan is None: scan = self.lidar_scan lidar_angles = self.lidar_angles x_ray_ends = px + scan * np.cos(lidar_angles) y_ray_ends = py + scan * np.sin(lidar_angles) is_in_fov = np.cos(lidar_angles - angle) >= 0.78 for ray_idx in range(len(scan)): end_x = x_ray_ends[ray_idx] end_y = y_ray_ends[ray_idx] gl.glBegin(gl.GL_LINE_LOOP) if is_in_fov[ray_idx]: gl.glColor4f(1., 1., 0., 0.1) else: gl.glColor4f(lidarcolor[0], lidarcolor[1], lidarcolor[2], 0.1) gl.glVertex3f(px, py, 0) gl.glVertex3f(end_x, end_y, 0) gl.glEnd() # Agent body for n, agent in enumerate([self.soadrl_sim.robot] + self.soadrl_sim.humans): px = agent.px py = agent.py angle = agent.theta r = agent.radius # Agent as Circle poly = make_circle((px, py), r) gl.glBegin(gl.GL_POLYGON) if n == 0: color = np.array([1., 1., 1.]) else: color = agentcolor gl.glColor4f(color[0], color[1], color[2], 1) for vert in poly: gl.glVertex3f(vert[0], vert[1], 0) gl.glEnd() # Direction triangle xnose = px + r * np.cos(angle) ynose = py + r * np.sin(angle) xright = px + 0.3 * r * -np.sin(angle) yright = py + 0.3 * r * np.cos(angle) xleft = px - 0.3 * r * -np.sin(angle) yleft = py - 0.3 * r * np.cos(angle) gl.glBegin(gl.GL_TRIANGLES) gl.glColor4f(nosecolor[0], nosecolor[1], nosecolor[2], 1) gl.glVertex3f(xnose, ynose, 0) gl.glVertex3f(xright, yright, 0) gl.glVertex3f(xleft, yleft, 0) gl.glEnd() # Goal xgoal = self.soadrl_sim.robot.gx ygoal = self.soadrl_sim.robot.gy r = self.soadrl_sim.robot.radius if goal_override is not None: xgoal, ygoal = goal_override # Goal markers gl.glBegin(gl.GL_TRIANGLES) gl.glColor4f(goalcolor[0], goalcolor[1], goalcolor[2], 1) triangle = make_circle((xgoal, ygoal), r, res=3) for vert in triangle: gl.glVertex3f(vert[0], vert[1], 0) gl.glEnd() # Goal line gl.glBegin(gl.GL_LINE_LOOP) gl.glColor4f(goallinecolor[0], goallinecolor[1], goallinecolor[2], 1) gl.glVertex3f(rx, ry, 0) gl.glVertex3f(xgoal, ygoal, 0) gl.glEnd() # -- self.transform.disable() # Text self.score_label.text = "" if show_score: self.score_label.text = "R {}".format(self.episode_reward) self.score_label.draw() win.flip() if save_to_file: pyglet.image.get_buffer_manager().get_color_buffer().save( "/tmp/navreptrainenv{:05}.png".format(self.total_steps)) return self.viewer.isopen else: raise NotImplementedError
def scan_callback(self, msg): atic = timer() # edge case: first callback if self.msg_prev is None: self.msg_prev = msg return if self.odom is None: print("odom not received yet") return if self.tf_rob_in_fix is None: print("tf_rob_in_fix not found yet") return if self.tf_goal_in_fix is None: self.tf_goal_in_fix = self.tf_rob_in_fix print("goal set") # TODO check that odom and tf are not old # measure rotation TODO s = np.array(msg.ranges) # prediction dt = (msg.header.stamp - self.msg_prev.header.stamp).to_sec() s_prev = np.array(self.msg_prev.ranges) ds = (s - s_prev) max_ds = self.kMaxObstacleVel_ms * dt ds_capped = ds ds_capped[np.abs(ds) > max_ds] = 0 s_next = np.maximum(0, s + ds_capped).astype(np.float32) # cluster EUCLIDEAN_CLUSTERING_THRESH_M = 0.05 angles = np.linspace(0, 2 * np.pi, s_next.shape[0] + 1, dtype=np.float32)[:-1] clusters, x, y = clustering.euclidean_clustering( s_next, angles, EUCLIDEAN_CLUSTERING_THRESH_M) cluster_sizes = clustering.cluster_sizes(len(s_next), clusters) s_next[cluster_sizes <= 3] = 25 # dwa # Get state # goal in robot frame goal_in_robot_frame = apply_tf_to_pose( Pose2D(self.tf_goal_in_fix), inverse_pose2d(Pose2D(self.tf_rob_in_fix))) gx = goal_in_robot_frame[0] gy = goal_in_robot_frame[1] # robot speed in robot frame u = self.odom.twist.twist.linear.x v = self.odom.twist.twist.linear.y w = self.odom.twist.twist.angular.z DWA_DT = 0.5 COMFORT_RADIUS_M = self.kRobotComfortRadius_m MAX_XY_VEL = 0.5 tic = timer() best_u, best_v, best_score = dynamic_window.linear_dwa( s_next, angles, u, v, gx, gy, DWA_DT, DV=0.05, UMIN=0 if self.args.forward_only else -MAX_XY_VEL, UMAX=MAX_XY_VEL, VMIN=-MAX_XY_VEL, VMAX=MAX_XY_VEL, AMAX=10., COMFORT_RADIUS_M=COMFORT_RADIUS_M, ) toc = timer() # print(best_u * DWA_DT, best_v * DWA_DT, best_score) # print("DWA: {:.2f} Hz".format(1/(toc-tic))) # Slow turn towards goal # TODO best_w = 0 WMAX = 0.5 angle_to_goal = np.arctan2(gy, gx) # [-pi, pi] if np.sqrt(gx * gx + gy * gy) > 0.5: # turn only if goal is far away if np.abs(angle_to_goal) > (np.pi / 4 / 10): # deadzone best_w = np.clip(angle_to_goal, -WMAX, WMAX) # linear ramp if not self.STOP: # publish cmd_vel cmd_vel_msg = Twist() SIDEWAYS_FACTOR = 0.3 if self.args.forward_only else 1. cmd_vel_msg.linear.x = np.clip(best_u * 0.5, -0.3, 0.3) cmd_vel_msg.linear.y = np.clip(best_v * 0.5 * SIDEWAYS_FACTOR, -0.3, 0.3) cmd_vel_msg.angular.z = best_w self.cmd_vel_pub.publish(cmd_vel_msg) # publish cmd_vel vis pub = rospy.Publisher("/dwa_cmd_vel", Marker, queue_size=1) mk = Marker() mk.header.frame_id = self.kRobotFrame mk.ns = "arrows" mk.id = 0 mk.type = 0 mk.action = 0 mk.scale.x = 0.02 mk.scale.y = 0.02 mk.color.b = 1 mk.color.a = 1 mk.frame_locked = True pt = Point() pt.x = 0 pt.y = 0 pt.z = 0.03 mk.points.append(pt) pt = Point() pt.x = 0 + best_u * DWA_DT pt.y = 0 + best_v * DWA_DT pt.z = 0.03 mk.points.append(pt) pub.publish(mk) pub = rospy.Publisher("/dwa_goal", Marker, queue_size=1) mk = Marker() mk.header.frame_id = self.kRobotFrame mk.ns = "arrows" mk.id = 0 mk.type = 0 mk.action = 0 mk.scale.x = 0.02 mk.scale.y = 0.02 mk.color.g = 1 mk.color.a = 1 mk.frame_locked = True pt = Point() pt.x = 0 pt.y = 0 pt.z = 0.03 mk.points.append(pt) pt = Point() pt.x = 0 + gx pt.y = 0 + gy pt.z = 0.03 mk.points.append(pt) pub.publish(mk) pub = rospy.Publisher("/dwa_radius", Marker, queue_size=1) mk = Marker() mk.header.frame_id = self.kRobotFrame mk.ns = "radius" mk.id = 0 mk.type = 3 mk.action = 0 mk.pose.position.z = -0.1 mk.scale.x = COMFORT_RADIUS_M * 2 mk.scale.y = COMFORT_RADIUS_M * 2 mk.scale.z = 0.01 mk.color.b = 1 mk.color.g = 1 mk.color.r = 1 mk.color.a = 1 mk.frame_locked = True pub.publish(mk) # publish scan prediction msg_next = deepcopy(msg) msg_next.ranges = s_next # for pretty colors cluster_ids = clustering.cluster_ids(len(x), clusters) random_map = np.arange(len(cluster_ids)) np.random.shuffle(random_map) cluster_ids = random_map[cluster_ids] msg_next.intensities = cluster_ids self.pubs[0].publish(msg_next) # publish past msg_prev = deepcopy(msg) msg_prev.ranges = self.msg_prev.ranges self.pubs[1].publish(msg_prev) # ... # finally, set up for next callback self.msg_prev = msg atoc = timer() if self.args.hz: print("DWA callback: {:.2f} Hz".format(1 / (atoc - atic)))