def extract_nonplayer_transform(npmes, include_people=False): if npmes.HasField('vehicle'): return transform.Transform(npmes.vehicle.transform) elif npmes.HasField('pedestrian') and include_people: return transform.Transform(npmes.pedestrian.transform) else: return None
def get_rectifying_player_transform(player_measurements): """ Build transform to correct for pitch or roll of car. """ ptx = player_measurements.transform p_rectify = transform.Transform() pT = Translation(0, 0, 0) # Undo the pitch and roll (not yaw!) pR = Rotation(-ptx.rotation.pitch, 0, -ptx.rotation.roll) # pR = Rotation(ptx.rotation.pitch, 0, ptx.rotation.roll) p_rectify.set(pT, pR) return p_rectify
def run_episode(client, streaming_loader, model, phi, future, midlow_controller, car_pid_controllers, plottable_manager, waypointerconf, episode_params, metrics, fig, axes, cfg): dimconf = cfg.dim mainconf = cfg.main dataconf = cfg.data plotconf = cfg.plotting trackersconf = cfg.trackers expconf = cfg.experiment waypointerconf = cfg.waypointer del cfg # Build dirs. directory, plot_dir, dim_feeds_dir = set_up_directories(episode_params, cfg=plotconf, mainconf=mainconf, dataconf=dataconf) # Dump some objects. save_metadata(episode_params=episode_params, directory=directory, metrics=metrics) # Print the metrics. if episode_params.episode > 0: log.info("Current overall metrics: {}".format(metrics)) # Randomize the environment. episode_params.settings.randomize_seeds() if midlow_controller is not None: midlow_controller.reset() if model is not None: assert(phi is model.phi) # Now we load these settings into the server. The server replies # with a scene description containing the available start spots for # the player. Here we can provide a CarlaSettings object or a # CarlaSettings.ini file as string. log.info("Loading the settings into the server...") scene = client.load_settings(episode_params.settings) # Hang out? time.sleep(0.01) # Notify the server that we want to start the episode at the # player_start index. This function blocks until the server is ready # to start the episode. client.start_episode(choose_player_start(scene)) have_specific_episodes = len(expconf.specific_episodes) > 0 if not have_specific_episodes: # Possibly skip the episode. if episode_params.episode < expconf.skip_to: log.warning("SKIPPING EPISODE {}\n".format(episode_params.episode) * 10) return else: log.info("Filtering episodes to '{}'".format(expconf.specific_episodes)) if episode_params.episode not in expconf.specific_episodes: log.info("{} not in specific episodes".format(episode_params.episode)) return # Reset the plotting. if plotconf.plot: _ = [ax.cla() for ax in axes.ravel()] plot_state = cplot.PlotState.from_plottables(plotconf, plottable_manager.base_plottables) if plotconf.plot else None # Hang out for a bit, see https://github.com/carla-simulator/carla/issues/263 time.sleep(4) # Collect the recent measurements and controls. measurement_buffer = collections.deque(maxlen=dataconf.measurement_buffer_length) # Instatiate waypointer. waypointer = Waypointer(waypointerconf=waypointerconf, planner_or_map_name=expconf.scene) # For now, just track whether the robot is stationary. stop_tracker = agent_util.StationaryTracker( size=trackersconf.stationary_window_size, thresh=trackersconf.stationary_threshold, track_others=False) # Use this to detect if agent is stationary longer than it should be (e.g. 100frames->10 seconds). # Should be beyond the max light time and before the final timeout time (stop tracker). stuck_tracker = agent_util.NonredStuckTracker( waypointer=waypointer, size=trackersconf.stuck_window_size, thresh=trackersconf.stationary_threshold) # If in the past N frames, the car's pose has changed by more than M degrees, then it's executing a turn. turn_tracker = agent_util.TurnTracker(size=trackersconf.turn_tracker_frames, thresh=trackersconf.turn_tracker_thresh) # Reset the controllers. car_pid_controllers.reset() # Initially not collided. sum_collision_impulse = 0 # --------------------------------------- # TODO incorporate pasts of other agents! # --------------------------------------- log.warning("Defaulting to a hardcoded A past of 1") A_past = 5 # Ensure all previous episodes were properly concluded. for em in metrics.all_episode_metrics: assert(em.concluded) log.info("Current number of successes and failures: {}, {}".format( metrics.n_successes, metrics.n_failures)) # Create new episode metric metrics.begin_new_episode(episode_params) waypointerconf.have_planned = False # Step through the simulation. for frame in range(0, episode_params.frames_per_episode): log.debug("On frame {:06d}".format(frame)) if frame == episode_params.frames_per_episode - 1: summary = "Ran out of episode frames! Treating as failure" log.warning(summary) metrics.conclude_episode(success=False, summary=summary) with open(episode_params.root_dir + "/metrics.dill".format(episode_params.episode), 'wb') as f: dill.dump(metrics, f) break have_control = frame > expconf.min_takeover_frames # ----------------------- # Read the data produced by the server this frame. Record measurements. # ----------------------- measurement, sensor_data = client.read_data() measurement_buffer.append(measurement) sensor_data = attrdict.AttrDict(sensor_data) # Prevent sensor data mutation. agent_util.lock_observations(sensor_data) transform_now = transform.Transform(measurement.player_measurements.transform).inverse() if mainconf.pilot != 'auto': waypoints, waypoints_local = waypointer.get_waypoints_from_transform_and_measurement(transform_now, measurement) if waypoints is None or waypoints_local is None: summary = "Waypointer error! Episode failed" log.error(summary) metrics.conclude_episode(success=False, summary=summary) break else: waypoints, waypoints_local = None, None # Some measurement-only computations if have_control: # Index the measurement to check if we've stopped. stop_tracker.index_new_measurement(measurement) stuck_tracker.index_new_measurement(measurement) stationary, stuck = stuck_tracker.is_stuck_far_from_red_lights(ignore_reds=waypointerconf.stuck_ignore_reds) if stuck: log.warning("Determined that the vehicle is stuck not near a red light!") else: stationary, stuck = False, False turn_tracker.index_new_measurement(measurement) # ----------------------- # Instantiate the object that represents all observations at the current time. # ----------------------- current_obs = preproc.PlayerObservations( measurement_buffer, t_index=-1, radius=200, A=A_past, waypointer=waypointer, frame=frame) # Store some tracking data in the observation. current_obs.is_turning = turn_tracker.is_turning() current_obs.is_stuck = stuck current_obs.is_stationary = stationary transform_now = current_obs.inv_tform_t # world2local if frame == 0: start_obs = current_obs log.debug("Episode={}, Current frame={}".format(episode_params.episode, current_obs.frame)) # Populate extra metrics. extra_metrics = {} traffic_light_state, traffic_light_data = waypointer.get_upcoming_traffic_light(measurement, sensor_data) log.debug("Upcoming traffic light is: '{}'.".format(traffic_light_state)) current_obs.traffic_light_state = traffic_light_state current_obs.traffic_light_data = traffic_light_data extra_metrics['red_light_violations'] = waypointer.red_light_violations extra_metrics['intersection_count'] = waypointer.intersection_count if trackersconf.reset_trackers_on_red and traffic_light_state == 'RED': log.debug("Resetting trackers because we observed a RED light") stop_tracker.reset() stuck_tracker.reset() # Prune near and far waypoints, possibly perturb. if mainconf.pilot != 'auto': # pdb.set_trace() waypoints_control, waypoint_metadata = waypointer.prepare_waypoints_for_control(waypoints_local, current_obs, dimconf=dimconf) npu.lock_nd(waypoints_control) else: waypoints_control = None waypoint_metadata = {} # Reset plotter. plottable_manager.reset() if mainconf.pilot != 'auto': # Check if we have enough waypoints to continue, and if we've stopped. episode_state, summary = agent_util.decide_episode_state( waypointer, waypoints, stop_tracker, start_obs, current_obs, goal_distance_threshold=expconf.goal_distance_threshold) if episode_state == agent_util.EpisodeState.SUCCESS: metrics.conclude_episode(success=True, summary=summary) break elif episode_state == agent_util.EpisodeState.FAILURE: metrics.conclude_episode(success=False, summary=summary) break elif episode_state == agent_util.EpisodeState.INPROGRESS: pass else: raise ValueError('unknown episode state') # ------------------ # Update the metrics # ------------------ metrics.update(measurement, extra_metrics) if have_control: metrics.update_passenger_comfort(current_obs) # Periodically print if episode_params.episode > 0 and frame % 1000 == 0: log.info("Intermediate print... current overall metrics: {}".format(metrics)) # Check for collision. if check_for_collision(measurement, sum_collision_impulse, episode_params, metrics, frame, player_transform=current_obs.inv_tform_t, allow_vehicles_to_hit_us_from_behind=expconf.allow_vehicles_to_hit_us_from_behind): # Create control anyway in case server is looking for it? control = autopilot_controller.noisy_autopilot( measurement, replan_index=dimconf.replan_period, replan_period=dimconf.replan_period, cfg=dataconf) # Send the control. client.send_control(control) # Quit the episode. break # Update current total collision impulse. sum_collision_impulse = measurement.player_measurements.collision_other # Get the feed_dict for this frame to build features / plottables. # TODO should be T_past, not 3. if frame > 3: fd = streaming_loader.populate_phi_feeds( phi=phi, sensor_data=sensor_data, measurement_buffer=measurement_buffer, with_bev=True, with_lights=True, observations=current_obs, frame=frame) if have_control and dataconf.save_data: fd_previous = streaming_loader.populate_expert_feeds(current_obs, future, frame) if frame % dataconf.save_period_frames == 0: fn = "{}/feed_{:08d}.json".format(dim_feeds_dir, frame) log.debug("Saving feed to '{}'".format(fn)) preproc.dict_to_json(fd_previous, fn) streaming_loader.prune_old(frame) # If we have a non-autopilot controller, and enough frames if have_control and mainconf.pilot not in ('auto', 'user', 'zero'): prune_nearby = current_obs.is_turning and waypointerconf.drop_near_on_turn if stuck or prune_nearby: log.warning("Vehicle is stuck or turning, preparing to adjust goal likelihood") waypoints_control = waypointer.get_unsticking_waypoints(waypoints_control, midlow_controller, current_obs) if mainconf.save_dim_feeds: fn = "{}/feed_{:08d}.json".format(dim_feeds_dir, frame) preproc.dict_to_json(fd, fn) if mainconf.override_pilot: log.debug("Building autopilot control") # If not in controlled mode, or not in controlled mode yet, use autopilot. control = autopilot_controller.noisy_autopilot( measurement, replan_index=dimconf.replan_period, replan_period=dimconf.replan_period, cfg=dataconf) approach_plotting_data = None have_control = False else: # Generate the control for the pilot. control, approach_plotting_data = generate_control( mainconf.pilot, midlow_controller=midlow_controller, car_pid_controllers=car_pid_controllers, model=model, fd=fd, measurement=measurement, waypoints_local=waypoints_control, transform_now=transform_now, waypointer=waypointer, current_obs=current_obs, sensor_data=sensor_data, waypoint_metadata=waypoint_metadata) else: log.debug("Building autopilot control") # If not in controlled mode, or not in controlled mode yet, use autopilot. control = autopilot_controller.noisy_autopilot( measurement, replan_index=dimconf.replan_period, replan_period=dimconf.replan_period, cfg=dataconf) approach_plotting_data = None log.debug("Control: {}".format(control)) client.send_control(control) waypointerconf.have_planned = have_control if plotconf.plot and frame > 3: plot_data = cplot.GenericPerFramePlottingData( pilot=mainconf.pilot, hires=plotconf.hires_plot, measurement=measurement, feed_dict=fd, waypoints_local=waypoints_local, waypoints_control=waypoints_control, waypoint_metadata=waypoint_metadata, transform_now=transform_now, current_obs=current_obs, control=control) if have_control: cplot.update_pilots_plot(mainconf.pilot, plottable_manager, plot_data, approach_plotting_data) else: cplot.update_pilots_plot('auto', plottable_manager, plot_data, approach_plotting_data) # plottable_manager.update_from_observation(current_obs, plot_data=plot_data, control=control) log.debug("Plotting data") cplot.online_plot(model=model, measurements=measurement, sensor_data=sensor_data, overhead_lidar=fd[phi.overhead_features][0,...,0], overhead_semantic=None, plottables=plottable_manager.plottables, fd=fd, axes=axes, fig=fig, plot_state=plot_state) if plotconf.save_plots: log.debug("Saving plot") # pdb.set_trace() # fig.savefig('{}/plot_{:08d}.jpg'.format(plot_dir, frame), dpi=180) # Get the extent of the specific axes we'll save. if plotconf.remove_second_row: joint_extent = cplot.full_extent([axes[1,0], axes[0,1]]) extent0 = joint_extent.transformed(fig.dpi_scale_trans.inverted()) # fig.savefig('{}/plot_{:08d}.png'.format(plot_dir, frame), bbox_inches=extent0, dpi=cfg.dpi*2) fig.savefig('{}/plot_{:08d}.{}'.format(plot_dir, frame, plotconf.imgfmt), bbox_inches=extent0, dpi=plotconf.dpi*2) else: # fig.savefig('{}/plot_{:08d}.png'.format(plot_dir, frame), dpi=plotconf.dpi) fig.savefig('{}/plot_{:08d}.{}'.format(plot_dir, frame, plotconf.imgfmt), dpi=plotconf.dpi) else: pass if frame % episode_params.snapshot_frequency == 0: log.info("Saving visual snapshot of environment. Frame={}".format(frame)) # Serialize the sensor data to disk. for name, datum in sensor_data.items(): if name == 'CameraRGB': filename = os.path.join(directory, '{}_{:06d}'.format(name, frame)) datum.save_to_disk(filename)
def __init__(self, measurements, t_index, radius, A=None, agent_id_ordering=None, empty_val=200, waypointer=None, frame=None): """This objects holds some metadata and history about observations. It's used as a pre-preprocessing object to pass around outside of the model. :param measurements: list of measurements :param t_index: :param radius: if multiagent, radius inside which to include other agents :param multiagent: whether to include other agents :param A: :param agent_id_ordering: optional, None or dict str id : index of prediction. :returns: :rtype: """ self.t = t_index # The sequence of player measurements. self.player_measurement_traj = [ _.player_measurements for _ in measurements ] # The transform at 'now' self.tform_t = transform.Transform( self.player_measurement_traj[t_index].transform) # The inverse transform at 'now' self.inv_tform_t = self.tform_t.inverse() # The sequence of player forward speeds. self.player_forward_speeds = np.asarray( [_.forward_speed for _ in self.player_measurement_traj]) # The sequence of player accelerations. These are in world coords. self.accels_world = np.asarray([ vector3_to_np(_.acceleration) for _ in self.player_measurement_traj ]) # Apply transform, undo translation. self.accels_local = ( self.inv_tform_t.transform_points(self.accels_world) - self.inv_tform_t.matrix[:3, -1]) # (T,3) sequence of player positions (CARLA world coords). self.player_positions_world = np.asarray([ vector3_to_np(_.transform.location) for _ in self.player_measurement_traj ]) # (T,3) sequence of player positions (CARLA local coords / R2P2 world frames of transform at 'now') self.player_positions_local = self.inv_tform_t.transform_points( self.player_positions_world) # global player rotations. self.rotations = np.asarray([ transforms_rotation_to_np(_.transform) for _ in self.player_measurement_traj ]) # Get current measurement self.measurement_now = measurements[self.t] if agent_id_ordering is None: # Get agents currently close to us. self.all_nearby_agent_ids = get_nearby_agent_ids( self.measurement_now, radius=radius) self.nearby_agent_ids = self.all_nearby_agent_ids[:A - 1] # Extract just the aid from (dist, aid) pairs. self.nearby_agent_ids_flat = [_[1] for _ in self.nearby_agent_ids] # Store our ordering of the agents. self.agent_id_ordering = dict( tuple(reversed(_)) for _ in enumerate(self.nearby_agent_ids_flat)) else: # Expand radius so we make sure to get the desired agents. self.all_nearby_agent_ids = get_nearby_agent_ids( self.measurement_now, radius=9999999 * radius) self.nearby_agent_ids = [None] * len(agent_id_ordering) for dist, aid in self.all_nearby_agent_ids: if aid in agent_id_ordering: # Put the agent id in the provided index. self.nearby_agent_ids[agent_id_ordering[aid]] = (dist, aid) # Extract all agent transforms. TODO configurable people. self.npc_tforms_unfilt = extract_nonplayer_transforms_list( measurements, False) # Collate the transforms of nearby agents. self.npc_tforms_nearby = collections.OrderedDict() self.npc_trajectories = [] for dist, nid in self.nearby_agent_ids: self.npc_tforms_nearby[nid] = self.npc_tforms_unfilt[nid] self.npc_trajectories.append( [transform_to_loc(_) for _ in self.npc_tforms_unfilt[nid]]) self.all_npc_trajectories = [] for dist, nid in self.all_nearby_agent_ids: self.all_npc_trajectories.append( [transform_to_loc(_) for _ in self.npc_tforms_unfilt[nid]]) history_shapes = [ np.asarray(_).shape for _ in self.all_npc_trajectories ] different_history_shapes = len(set(history_shapes)) > 1 if different_history_shapes: log.error( "Not all agents have the same history length! Pruning smallest agents until there's just one shape" ) while len(set(history_shapes)) > 1: history_shapes = [ np.asarray(_).shape for _ in self.all_npc_trajectories ] smallest_agent_index = np.argmin(history_shapes, axis=0)[0] self.all_npc_trajectories.pop(smallest_agent_index) # The other agent positions in world frame. self.agent_positions_world = np.asarray(self.npc_trajectories) # N.B. This reshape will fail if histories are different sizes. self.unfilt_agent_positions_world = np.asarray( self.all_npc_trajectories) self.n_missing = max(A - 1 - self.agent_positions_world.shape[0], 0) # length-A list, indicating if we have each agent. self.agent_indicators = [ 1 ] + [1] * len(self.npc_trajectories) + [0] * self.n_missing if self.n_missing == 0: pass elif self.n_missing > 0: # (3,) faraway = self.player_positions_world[-1] + 500 faraway_tile = np.tile(faraway[None, None], (self.n_missing, len(measurements), 1)) if self.n_missing == 0: pass elif self.n_missing == A - 1: self.agent_positions_world = faraway_tile else: self.agent_positions_world = np.concatenate( (self.agent_positions_world, faraway_tile), axis=0) self.player_position_now_world = self.player_positions_world[self.t] oshape = self.agent_positions_world.shape uoshape = self.unfilt_agent_positions_world.shape apw_pack = np.reshape(self.agent_positions_world, (-1, 3)) uapw_pack = np.reshape(self.unfilt_agent_positions_world, (-1, 3)) # Store all agent current positions in agent frame. self.unfilt_agent_positions_local = np.reshape( self.inv_tform_t.transform_points(uapw_pack), uoshape) if A == 1: self.agent_positions_local = np.array([]) self.agent_positions_now_world = np.array([]) self.all_positions_now_world = self.player_position_now_world[None] else: self.agent_positions_local = np.reshape( self.inv_tform_t.transform_points(apw_pack), oshape) self.agent_positions_now_world = self.agent_positions_world[:, self.t] self.all_positions_now_world = np.concatenate( (self.player_position_now_world[None], self.agent_positions_now_world), axis=0) assert (self.all_positions_now_world.shape == (A, 3)) if self.n_missing > 0: log.warning( "Overwriting missing agent local positions with empty_val={}!". format(empty_val)) self.agent_positions_local[-self.n_missing:] = empty_val self.yaws_world = [self.tform_t.yaw] # Extract the yaws for the agents at t=now. for tforms in self.npc_tforms_nearby.values(): self.yaws_world.append(tforms[self.t].yaw) self.yaws_world.extend([0] * self.n_missing) assert (self.agent_positions_world.shape[0] == A - 1) assert (self.agent_positions_local.shape[0] == A - 1) assert (len(self.yaws_world) == A) assert (len(self.agent_indicators) == A) if waypointer is not None: # Get the light state from the most recent measurement. self.traffic_light_state, self.traffic_light_data = waypointer.get_upcoming_traffic_light( measurements[-1], sensor_data=None) else: log.warning("Not recording traffic light state in observation!") self.player_destination_world = waypointer.goal_position if self.player_destination_world is not None: self.player_destination_local = self.inv_tform_t.transform_points( self.player_destination_world[None])[0] else: self.player_destination_local = None self.yaws_local = [ yaw_world - self.yaws_world[0] for yaw_world in self.yaws_world ]
def get_rectified_player_transform(player_measurements): return (get_rectifying_player_transform(player_measurements) * transform.Transform(player_measurements.transform))