def __init__(self, location: Location = None, rotation: Rotation = None, matrix=None): if matrix is not None: self.matrix = matrix self.location = Location(matrix[0, 3], matrix[1, 3], matrix[2, 3]) # Forward vector is retrieved from the matrix. self.forward_vector = \ Vector3D(self.matrix[0, 0], self.matrix[1, 0], self.matrix[2, 0]) pitch_r = math.asin(np.clip(self.forward_vector.z, -1, 1)) yaw_r = math.acos( np.clip(self.forward_vector.x / math.cos(pitch_r), -1, 1)) roll_r = math.asin( np.clip(matrix[2, 1] / (-1 * math.cos(pitch_r)), -1, 1)) self.rotation = Rotation(math.degrees(pitch_r), math.degrees(yaw_r), math.degrees(roll_r)) else: self.location, self.rotation = location, rotation self.matrix = Transform._create_matrix(self.location, self.rotation) # Forward vector is retrieved from the matrix. self.forward_vector = \ Vector3D(self.matrix[0, 0], self.matrix[1, 0], self.matrix[2, 0])
def process_segmentation_images(msg, camera_setup, ego_vehicle, speed, csv, dump=False): print("Received a message for the time: {}".format(msg.timestamp)) # If we are in distance to the destination, stop and exit with success. if ego_vehicle.get_location().distance(VEHICLE_DESTINATION) <= 5: ego_vehicle.set_velocity(Vector3D()) CLEANUP_FUNCTION() sys.exit(0) # Compute the segmentation mIOU. frame = SegmentedFrame.from_simulator_image(msg, camera_setup) compute_and_log_miou(frame, msg.timestamp, csv) # Visualize the run. if dump: frame.save(int(msg.timestamp * 1000), './_out/', 'seg') # Move the ego_vehicle according to the given speed. ego_vehicle.set_velocity(Vector3D(x=-speed)) # Move the simulator forward. ego_vehicle.get_world().tick()
def add_agent(self, agent: ip.Agent, rolename: str = None, blueprint: carla.ActorBlueprint = None): """ Add a vehicle to the simulation. Defaults to an Audi A2 for blueprints if not explicitly given. Args: agent: Agent to add. rolename: Unique name for the actor to spawn. blueprint: Optional blueprint defining the properties of the actor. Returns: The newly added actor. """ if blueprint is None: blueprint_library = self.__world.get_blueprint_library() blueprint = blueprint_library.find('vehicle.audi.a2') if rolename is not None: blueprint.set_attribute('role_name', rolename) state = agent.state yaw = np.rad2deg(-state.heading) transform = Transform(Location(x=state.position[0], y=-state.position[1], z=0.1), Rotation(yaw=yaw)) actor = self.__world.spawn_actor(blueprint, transform) actor.set_target_velocity(Vector3D(state.velocity[0], -state.velocity[1], 0.)) carla_agent = ip.carla.CarlaAgentWrapper(agent, actor) self.agents[carla_agent.agent_id] = carla_agent
def as_simulator_vector(self): """Retrieves the 3D vector as an instance of simulator 3D vector. Returns: An instance of the simulator class representing the 3D vector. """ from carla import Vector3D return Vector3D(self.x, self.y, self.z)
def twist_command_updated(self, twist): """ Set angular/linear velocity (this does not respect vehicle dynamics) """ if not self.vehicle_control_override: angular_velocity = Vector3D() angular_velocity.z = math.degrees(twist.angular.z) rotation_matrix = transforms.carla_rotation_to_numpy_rotation_matrix( self.carla_actor.get_transform().rotation) linear_vector = numpy.array([twist.linear.x, twist.linear.y, twist.linear.z]) rotated_linear_vector = rotation_matrix.dot(linear_vector) linear_velocity = Vector3D() linear_velocity.x = rotated_linear_vector[0] linear_velocity.y = -rotated_linear_vector[1] linear_velocity.z = rotated_linear_vector[2] rospy.logdebug("Set velocity linear: {}, angular: {}".format( linear_velocity, angular_velocity)) self.carla_actor.set_velocity(linear_velocity) self.carla_actor.set_angular_velocity(angular_velocity)
def process_depth_images(msg, depth_camera_setup, ego_vehicle, speed, csv, surface, visualize=False): print("Received a message for the time: {}".format(msg.timestamp)) # If we are in distance to the destination, stop and exit with success. if ego_vehicle.get_location().distance(VEHICLE_DESTINATION) <= 5: ego_vehicle.set_velocity(Vector3D()) CLEANUP_FUNCTION() sys.exit(0) # Get the RGB image corresponding to the given depth image timestamp. rgb_image = retrieve_rgb_image(msg.timestamp) # Get the semantic image corresponding to the given depth image timestamp. semantic_image = retrieve_semantic_image(msg.timestamp) semantic_frame = SegmentedFrame.from_simulator_image(semantic_image, depth_frame.camera_setup) # Visualize the image and the bounding boxes if needed. if visualize: draw_image(rgb_image, surface) # Transform people into obstacles relative to the current frame. bb_surface = None resolution = (depth_camera_setup.width, depth_camera_setup.height) if visualize: bb_surface = pygame.Surface(resolution) bb_surface.set_colorkey((0, 0, 0)) vehicle_transform = pylot.utils.Transform.from_simulator_transform( ego_vehicle.get_transform()) depth_frame = DepthFrame.from_simulator_frame(msg, depth_camera_setup) # Transform the static camera setup with respect to the location of the # vehicle in the world. depth_frame.camera_setup.set_transform(vehicle_transform * depth_frame.camera_setup.transform) detected_people = [] for person in ego_vehicle.get_world().get_actors().filter('walker.*'): obstacle = Obstacle.from_simulator_actor(person) if obstacle._distance(vehicle_transform) > 125: bbox = None else: bbox = obstacle.populate_bounding_box_2D(depth_frame, semantic_frame.frame) if bbox is not None: detected_people.append(bbox) if visualize: draw_bounding_box(bbox, bb_surface) # We have drawn all the bounding boxes on the bb_surface, now put it on # the RGB image surface. if visualize: surface.blit(bb_surface, (0, 0)) pygame.display.flip() # Compute the mAP. print("We detected a total of {} people.".format(len(detected_people))) compute_and_log_map(detected_people, msg.timestamp, csv) # Move the ego_vehicle according to the given speed. ego_vehicle.set_velocity(Vector3D(x=-speed)) ego_vehicle.get_world().tick()