Example #1
0
    def __init__(self,
                 location: Location = None,
                 rotation: Rotation = None,
                 matrix=None):
        if matrix is not None:
            self.matrix = matrix
            self.location = Location(matrix[0, 3], matrix[1, 3], matrix[2, 3])

            # Forward vector is retrieved from the matrix.
            self.forward_vector = \
                Vector3D(self.matrix[0, 0], self.matrix[1, 0],
                         self.matrix[2, 0])
            pitch_r = math.asin(np.clip(self.forward_vector.z, -1, 1))
            yaw_r = math.acos(
                np.clip(self.forward_vector.x / math.cos(pitch_r), -1, 1))
            roll_r = math.asin(
                np.clip(matrix[2, 1] / (-1 * math.cos(pitch_r)), -1, 1))
            self.rotation = Rotation(math.degrees(pitch_r),
                                     math.degrees(yaw_r), math.degrees(roll_r))
        else:
            self.location, self.rotation = location, rotation
            self.matrix = Transform._create_matrix(self.location,
                                                   self.rotation)

            # Forward vector is retrieved from the matrix.
            self.forward_vector = \
                Vector3D(self.matrix[0, 0], self.matrix[1, 0],
                         self.matrix[2, 0])
Example #2
0
def main(argv):
    global pixels_to_check
    target_vehicle_transform = Transform(Location(242, 131.24, 0))
    sensor_transform = Transform(Location(237.7, 132.24, 1.3))
    pixels_to_check = [(200, 370)]
    run_scenario(target_vehicle_transform, sensor_transform)

    target_vehicle_transform = Transform(Location(2, 12, 0))
    sensor_transform = Transform(Location(0, 18, 1.4),
                                 Rotation(pitch=0, yaw=-90, roll=0))
    pixels_to_check = [(500, 400), (600, 400), (500, 350), (600, 350)]
    run_scenario(target_vehicle_transform, sensor_transform)
Example #3
0
 def use_sample(self, sample):
     print('USE_SAMPLE Sample:', sample)
     weather = carla.WeatherParameters(**{k: sample.params._asdict()[k] for k in WEATHER_PARAMS})
     self.world.world.set_weather(weather)
     for obj in sample.objects:
         spawn = Transform(self.snap_to_ground(Location(x=obj.position[0],
                                                        y=-obj.position[1], z=1)),
                           Rotation(yaw=-obj.heading * 180 / math.pi - 90))
         attrs = dict()
         if 'color' in obj._fields:
             color = str(int(obj.color.r * 255)) + ',' \
                 + str(int(obj.color.g * 255)) + ',' + str(int(obj.color.b * 255))
             attrs['color'] = color
         if 'blueprint' in obj._fields:
             attrs['blueprint_filter'] = obj.blueprint
         agent = BrakeAgent
         if 'agent' in obj._fields:
             agent = AGENTS[obj.agent]
         if obj.type in ['Vehicle', 'Car', 'Truck', 'Bicycle', 'Motorcycle']:
             self.world.add_vehicle(agent,
                                    spawn=spawn,
                                    has_collision_sensor=False,
                                    has_lane_sensor=False,
                                    ego=obj is sample.objects[0],
                                    **attrs)
         elif obj.type == 'Pedestrian':
             self.world.add_pedestrian(spawn=spawn, **attrs)
         elif obj.type in ['Prop', 'Trash', 'Cone']:
             self.world.add_prop(spawn=spawn, **attrs)
         else:
             print('Unsupported object type:', obj.type)
Example #4
0
 def spawn_vehicles(self):
     self.bp_lib = self.world.get_blueprint_library()
     ego_bp = self.bp_lib.filter('vehicle.tesla.model3')[0]
     spawn_point = Transform(Location(x=392.1, y=10.0, z=0.02), Rotation(yaw=89.6))
     self.ego_vehicle = self.world.spawn_actor(ego_bp, spawn_point)
     self.setup_sensors(self.ego_vehicle)
     self.actor.append(self.ego_vehicle)
     self.ego_vehicle.get_world()
     leading_bp = self.bp_lib.filter('vehicle.audi.a2')[0]
     #leading_bp = self.bp_lib.filter('vehicle.audi.etron')[0]
     #leading_bp = self.bp_lib.filter('vehicle.audi.tt')[0]
     #leading_bp = random.choice(self.bp_lib.filter('vehicle.audi.*'))
     spawn_point_leading = Transform(Location(x=392.1, y=320.0, z=0.0), Rotation(yaw=90))
     self.leading_vehicle = self.world.try_spawn_actor(leading_bp, spawn_point_leading)
     self.leading_vehicle.get_world()
     self.actor.append(self.leading_vehicle)
Example #5
0
    def add_agent(self,
                  agent: ip.Agent,
                  rolename: str = None,
                  blueprint: carla.ActorBlueprint = None):
        """ Add a vehicle to the simulation. Defaults to an Audi A2 for blueprints if not explicitly given.

        Args:
            agent: Agent to add.
            rolename: Unique name for the actor to spawn.
            blueprint: Optional blueprint defining the properties of the actor.

        Returns:
            The newly added actor.
        """
        if blueprint is None:
            blueprint_library = self.__world.get_blueprint_library()
            blueprint = blueprint_library.find('vehicle.audi.a2')

        if rolename is not None:
            blueprint.set_attribute('role_name', rolename)

        state = agent.state
        yaw = np.rad2deg(-state.heading)
        transform = Transform(Location(x=state.position[0], y=-state.position[1], z=0.1), Rotation(yaw=yaw))
        actor = self.__world.spawn_actor(blueprint, transform)
        actor.set_target_velocity(Vector3D(state.velocity[0], -state.velocity[1], 0.))

        carla_agent = ip.carla.CarlaAgentWrapper(agent, actor)
        self.agents[carla_agent.agent_id] = carla_agent
Example #6
0
def main(argv):
    client, world = get_world(FLAGS.simulator_host, FLAGS.simulator_port,
                              FLAGS.simulator_timeout)

    # Replayer time factor is only available in > 0.9.5.
    client.set_replayer_time_factor(0.1)
    print(
        client.replay_file(FLAGS.replay_file, FLAGS.replay_start_time,
                           FLAGS.replay_duration, FLAGS.replay_id))
    # Sleep a bit to allow the server to start the replay.
    time.sleep(1)
    vehicle = world.get_actors().find(FLAGS.replay_id)
    if vehicle is None:
        raise ValueError("There was an issue finding the vehicle.")

    # Install the camera.
    camera_blueprint = world.get_blueprint_library().find('sensor.camera.rgb')
    camera_blueprint.set_attribute('image_size_x',
                                   str(FLAGS.camera_image_width))
    camera_blueprint.set_attribute('image_size_y',
                                   str(FLAGS.camera_image_height))

    transform = Transform(Location(2.0, 0.0, 1.4),
                          Rotation(pitch=0, yaw=0, roll=0))

    camera = world.spawn_actor(camera_blueprint, transform, attach_to=vehicle)

    # Register the callback on the camera.
    camera.listen(process_images)

    time.sleep(20)
Example #7
0
 def reset(self, attempt=0):
     logger.info('Resetting ...')
     self._in_carla_autopilot = False
     self._in_reverse = False
     self._destroy()
     #
     blueprint_library = self._world.get_blueprint_library()
     vehicle_bp = blueprint_library.find('vehicle.tesla.model3')
     spawn_points = self._world.get_map().get_spawn_points()
     spawn_id = self._spawn_preferred_id if attempt == 0 and self._spawn_preferred_id >= 0 else np.random.randint(
         len(spawn_points))
     spawn_point = spawn_points[spawn_id]
     try:
         self._actor = self._world.spawn_actor(vehicle_bp, spawn_point)
     except RuntimeError as e:
         if attempt < 4:
             self.reset(attempt + 1)
         else:
             raise e
     logger.info("Spawn point is '{}' with id {}.".format(
         spawn_point, spawn_id))
     # Attach the camera's - defaults at https://carla.readthedocs.io/en/latest/cameras_and_sensors/.
     # Provide the position of the sensor relative to the vehicle.
     # Tell the world to spawn the sensor, don't forget to attach it to your vehicle actor.
     camera_front = self._world.spawn_actor(self._create_camera(),
                                            Transform(
                                                Location(x=1.25, z=1.4)),
                                            attach_to=self._actor)
     camera_rear = self._world.spawn_actor(self._create_camera(),
                                           Transform(
                                               Location(x=-1.0, z=2.0),
                                               Rotation(pitch=180,
                                                        roll=180)),
                                           attach_to=self._actor)
     self._sensors.append(camera_front)
     self._sensors.append(camera_rear)
     # Subscribe to the sensor stream by providing a callback function,
     # this function is called each time a new image is generated by the sensor.
     camera_front.listen(lambda data: self._on_camera(data, camera=0))
     camera_rear.listen(lambda data: self._on_camera(data, camera=1))
     self._traffic_manager.ignore_lights_percentage(self._actor, 100.)
     self._set_weather(use_preset=True)
     # The tracker need recreation through the actor dependency.
     if self._geo_tracker is not None:
         self._geo_tracker.quit()
     self._geo_tracker = GeoTrackerThread(self._world, self._actor)
     self._geo_tracker.start()
Example #8
0
    def as_simulator_location(self):
        """Retrieves the location as a simulator location instance.

        Returns:
            An instance of the simulator class representing the location.
        """
        from carla import Location
        return Location(self.x, self.y, self.z)
Example #9
0
def main(args):
    """ The main function that orchestrates the setup of the world, connection
    to the scenario and the subsequent logging of the frames for computation
    of the mIoU.

    Args:
        args: The argparse.Namespace instance that is retrieved from parsing
        the arguments.
    """

    # Setup the world.
    world = setup_world(args.host, args.port, args.delta)

    # Retrieve the ego-vehicle from the simulation.
    ego_vehicle = None
    while ego_vehicle is None:
        print("Waiting for the scenario to be ready ...")
        sleep(1)
        ego_vehicle = retrieve_actor(world, 'vehicle.*', 'hero')
        world.tick()

    # Connect the segmentation camera to the vehicle.
    segmentation_camera_transform = Transform(location=Location(1.0, 0.0, 1.8),
                                              rotation=Rotation(0, 0, 0))
    segmentation_camera, camera_setup = spawn_camera(
        'sensor.camera.semantic_segmentation', segmentation_camera_transform,
        ego_vehicle, *args.res.split('x'))

    # Open the CSV file for writing.
    csv_file = open(args.output, 'w')
    csv_writer = csv.writer(csv_file)

    # Create the cleanup function.
    global CLEANUP_FUNCTION
    CLEANUP_FUNCTION = functools.partial(cleanup_function,
                                         world=world,
                                         cameras=[segmentation_camera],
                                         csv_file=csv_file)

    # Register a callback function with the camera.
    segmentation_camera.listen(
        functools.partial(process_segmentation_images,
                          camera_setup=camera_setup,
                          ego_vehicle=ego_vehicle,
                          speed=args.speed,
                          csv=csv_writer,
                          dump=args.dump))

    try:
        # To keep the thread alive so that the images can be processed.
        while True:
            pass
    except KeyboardInterrupt:
        CLEANUP_FUNCTION()
        sys.exit(0)
Example #10
0
    def as_simulator_transform(self):
        """Converts the transform to a simulator transform.

        Returns:
            An instance of the simulator class representing the Transform.
        """
        from carla import Location, Rotation, Transform
        return Transform(
            Location(self.location.x, self.location.y, self.location.z),
            Rotation(pitch=self.rotation.pitch,
                     yaw=self.rotation.yaw,
                     roll=self.rotation.roll))
Example #11
0
 def restart(self):
     # Keep same camera config if the camera manager exists.
     cam_index = self.camera_manager.index if self.camera_manager is not None else 0
     cam_pos_index = self.camera_manager.transform_index if self.camera_manager is not None else 0
     # Get a random blueprint.
     # blueprint = random.choice(self.world.get_blueprint_library().filter(self._actor_filter))
     blueprint = random.choice(self.world.get_blueprint_library().filter('toyota'))
     blueprint.set_attribute('role_name', self.actor_role_name)
     if blueprint.has_attribute('color'):
         # color = random.choice(blueprint.get_attribute('color').recommended_values)
         # color = "255,255,255"
         color = self.color
         blueprint.set_attribute('color', color)
     # Spawn the player.
     if self.player is not None:
         print("this is the playert not none")
         spawn_point = self.player.get_transform()
         spawn_point.location.z += 2.0
         spawn_point.rotation.roll = 0.0
         spawn_point.rotation.pitch = 0.0
         self.destroy()
         self.player = self.world.try_spawn_actor(blueprint, spawn_point)
     while self.player is None:
         # def ego_spawn_point(points):
         #     for pt in points:
         #         if pt.location.x < 69 and pt.location.x > 68 and pt.location.y > 6 and pt.location.y < 7:
         #             return pt
         # def hero_spawn_point(points):
         #     for pt in points:
         #         if pt.location.x < 69 and pt.location.x > 68 and pt.location.y > 7.5 and pt.location.y < 10:
         #             return pt
         # spawn_points = self.map.get_spawn_points()
         # spawn_point = random.choice(spawn_points) if spawn_points else carla.Transform()
         # if self.actor_role_name == "hero":
         #     spawn_point = hero_spawn_point(spawn_points)
         # elif self.actor_role_name == "ego_vehicle":
         #     spawn_point = ego_spawn_point(spawn_points)
         # print(" ========================================= ")
         # for i in range(10):
         #     print(" these are all the available spawn points ", spawn_points[i])
         # spawn_point = spawn_points[179]
         spawn_point = Transform(Location(x=self.init_positionx, y=self.init_positiony, z=2.5), Rotation(pitch=1, yaw=180, roll=1))
         self.player = self.world.try_spawn_actor(blueprint, spawn_point)
     # Set up the sensors.
     self.collision_sensor = CollisionSensor(self.player, self.hud)
     self.lane_invasion_sensor = LaneInvasionSensor(self.player, self.hud)
     self.gnss_sensor = GnssSensor(self.player)
     self.camera_manager = CameraManager(self.player, self.hud)
     self.camera_manager.transform_index = cam_pos_index
     self.camera_manager.set_sensor(cam_index, notify=False)
     actor_type = get_actor_display_name(self.player)
     self.hud.notification(actor_type)
Example #12
0
    def from_simulator_transform(cls, transform):
        """Creates a pylot transform from a simulator transform.

        Args:
            transform: A simulator transform.

        Returns:
            :py:class:`.Transform`: An instance of a pylot transform.
        """
        from carla import Transform
        if not isinstance(transform, Transform):
            raise ValueError('transform should be of type Transform')
        return cls(Location.from_simulator_location(transform.location),
                   Rotation.from_simulator_rotation(transform.rotation))
Example #13
0
    def routing_callback(self, msg):
        if msg.status.error_code == ErrorCode.OK:
            self.planned_trajectory = None
            distance_threshold = 0.5

            ego_location = self.carla_actor.get_transform().location
            x0 = ego_location.x
            y0 = -ego_location.y

            apollo_wp = msg.routing_request.waypoint[0]
            x1 = apollo_wp.pose.x
            y1 = apollo_wp.pose.y

            dist = np.linalg.norm([x1 - x0, y1 - y0])
            if dist > distance_threshold:
                carla_wp = self.parent.map.get_waypoint(Location(x1, -y1, 0))
                self.carla_actor.set_transform(carla_wp.transform)
Example #14
0
    def transform_locations(self, locations):
        """Transforms the given set of locations (specified in the coordinate
        space of the current transform) to be in the world coordinate space.

        This method has the same functionality as transform_points, and
        is provided for convenience; when dealing with a large number of
        points, it is advised to use transform_points to avoid the slow
        conversion between a numpy array and list of locations.

        Args:
            points (list(:py:class:`.Location`)): List of locations.

        Returns:
            list(:py:class:`.Location`): List of transformed points.
        """
        points = np.array([loc.as_numpy_array() for loc in locations])
        transformed_points = self.__transform(points, self.matrix)
        return [Location(x, y, z) for x, y, z in transformed_points]
Example #15
0
def main():
    global world
    client = Client('localhost', 2000)
    world = client.get_world()
    settings = world.get_settings()
    settings.synchronous_mode = True
    settings.fixed_delta_seconds = 1.0 / 10
    world.apply_settings(settings)

    # Spawn the vehicle.
    vehicle = spawn_driving_vehicle(client, world)

    # Spawn the camera and register a function to listen to the images.
    camera = spawn_rgb_camera(world, Location(x=2.0, y=0.0, z=1.8),
                              Rotation(roll=0, pitch=0, yaw=0), vehicle)
    camera.listen(process_images)
    world.tick()
    return vehicle, camera, world
Example #16
0
def get_lane_marker_linestring_from_right_lane_road_and_lane_id(
        world, road_id, lane_id):

    all_waypoints = world.get_map().generate_waypoints(3)

    filtered_waypoints = []
    for wp in all_waypoints:

        if wp.lane_id == lane_id and wp.road_id == road_id:
            filtered_waypoints.append(wp)

    first_wp = filtered_waypoints[0]

    next_wps = []
    for i in range(150):
        next_wps.append(first_wp.next(3)[0])
        first_wp = first_wp.next(3)[0]

    lane_marker_locations = []
    for wp in next_wps:

        right_vector = wp.lane_width / 2 * getRightVector(
            wp.transform.rotation)
        lane_marker_location = Location(
            x=wp.transform.location.x + right_vector.x,
            y=wp.transform.location.y - right_vector.y,
            z=wp.transform.location.z,
        )

        lane_marker_locations.append(lane_marker_location)

    for loc in lane_marker_locations:

        world.debug.draw_string(
            loc,
            "o",
            draw_shadow=False,
            color=carla.Color(r=0, g=255, b=0),
            life_time=10,
        )

    linestring = LineString([[loc.x, loc.y] for loc in lane_marker_locations])

    return linestring
Example #17
0
    def get_traffic_light_waypoints(self, traffic_light):
        """
        get area of a given traffic light
        """
        base_transform = traffic_light.get_transform()
        base_rot = base_transform.rotation.yaw
        area_loc = base_transform.transform(
            traffic_light.trigger_volume.location)

        # Discretize the trigger box into points
        area_ext = traffic_light.trigger_volume.extent
        x_values = np.arange(-0.9 * area_ext.x, 0.9 * area_ext.x,
                             1.0)  # 0.9 to avoid crossing to adjacent lanes

        area = []
        for x in x_values:

            point = Vector3D(
                x, 0, area_ext.z).rotate(base_rot).as_simulator_vector()
            point_location = area_loc + Location(x=point.x, y=point.y)
            area.append(point_location)

        # Get the waypoints of these points, removing duplicates
        ini_wps = []
        for pt in area:
            wpx = self._map.get_waypoint(pt)
            # As x_values are arranged in order, only the last one has
            # to be checked
            if not ini_wps or ini_wps[-1].road_id != wpx.road_id or ini_wps[
                    -1].lane_id != wpx.lane_id:
                ini_wps.append(wpx)

        # Advance them until the intersection
        wps = []
        for wpx in ini_wps:
            while not wpx.is_intersection:
                next_wp = wpx.next(0.5)[0]
                if next_wp and not next_wp.is_intersection:
                    wpx = next_wp
                else:
                    break
            wps.append(wpx)

        return area_loc, wps
Example #18
0
 def setup_sensors(self, ego_vehicle):
     camera_bp = self.bp_lib.find('sensor.camera.rgb')
     camera_bp.set_attribute('image_size_x', '800')
     camera_bp.set_attribute('image_size_y', '600')
     camera_bp.set_attribute('fov', '100')
     self.camera = self.world.try_spawn_actor(
                     camera_bp,
                     Transform(Location(x=0.8,y=0.0,z=1.7), Rotation(yaw=0.0)), 
                     attach_to=ego_vehicle)
     self.actor.append(self.camera)
     collision_bp = self.bp_lib.find('sensor.other.collision')
     self.collision = self.world.try_spawn_actor(
                     collision_bp,
                     Transform(),
                     attach_to=self.ego_vehicle)
     self.actor.append(self.collision)
     self.image_queue = queue.Queue()
     self.camera.listen(self.image_queue.put)
     self.collision_queue = queue.Queue()
     self.collision.listen(self.collision_queue.put)
 def __init__(self, parent_actor, name, directory, H, W):
     """Constructor method"""
     # self.sensor = None
     self._parent = parent_actor
     self.recording = False
     self.directory = directory
     self.name = name
     self.BEV = np.zeros((3, H, W), dtype=np.uint8)
     world = self._parent.get_world()
     blueprint = world.get_blueprint_library().find(
         'sensor.camera.semantic_segmentation')  #semantic_segmentation
     blueprint.set_attribute('image_size_x', str(W))
     blueprint.set_attribute('image_size_y', str(H))
     blueprint.set_attribute('fov', '90')
     # Set the time in seconds between sensor captures
     blueprint.set_attribute('sensor_tick', '0')
     transform = Transform(Location(z=15), Rotation(pitch=-90))
     self.sensor = world.spawn_actor(blueprint,
                                     transform,
                                     attach_to=self._parent)
     # We need to pass the lambda a weak reference to
     # self to avoid circular reference.
     weak_self = weakref.ref(self)
     self.sensor.listen(lambda image: Camera._parse_image(weak_self, image))
Example #20
0
    def restart(self):
        # Keep same camera config if the camera manager exists.
        cam_index = self.camera_manager.index if self.camera_manager is not None else 0
        cam_pos_index = self.camera_manager.transform_index if self.camera_manager is not None else 0
        # Get a random blueprint.
        # ******I have made this random.choice just have one choice --> vehicle.bmw.grandtourer
        blueprint = random.choice(self.world.get_blueprint_library().filter(
            self._actor_filter))
        # print("blue print")
        # print(self.world.get_blueprint_library().filter(self._actor_filter))
        blueprint.set_attribute('role_name', 'hero')
        if blueprint.has_attribute('color'):
            color = random.choice(
                blueprint.get_attribute('color').recommended_values)
            # print("color")
            # print(blueprint.get_attribute('color').recommended_values)
            # ****** I have set the blueprint being white as '255,255,255'
            blueprint.set_attribute('color', '255,255,255')
        batch = []
        blueprint2 = random.choice(self.world.get_blueprint_library().filter(
            self._actor_filter))
        blueprint2.set_attribute('role_name', 'hero2')
        if blueprint2.has_attribute('color'):
            blueprint2.set_attribute('color', '0,0,0')
        # batch.append(SpawnActor(blueprint, transform))

        # Spawn the player.

        #     ******************* change positions ****************************************
        while self.player1 is None:
            # spawn_points = self.map.get_spawn_points()
            # print("length of sapwn_point is %d" % len(spawn_points) + ",just choose one")  # 257 just choose one
            x_rand = random.randint(18000, 23000)
            x_rand_v2 = random.randint(x_rand + 1000, 25000)
            x_rand = x_rand / 100.0
            x_rand_v2 = x_rand_v2 / 100.0
            y_rand = random.randint(12855, 13455)
            y_rand = y_rand / 100.0
            x_speed_randon_v2 = random.randint(100, 3000)
            if x_speed_randon_v2 - 1000 > 0:
                x_speed_player = x_speed_randon_v2 - 100

            else:
                x_speed_player = 0
            x_speed_player = x_speed_player / 100

            x_speed_randon_v2 = x_speed_randon_v2 / 100
            spawn_point1 = Transform(
                Location(x=x_rand, y=y_rand, z=1.320625),
                Rotation(pitch=0.000000, yaw=179.999756, roll=0.000000))
            # ********************************* end *****************************************
            self.player1 = self.world.try_spawn_actor(blueprint, spawn_point1)
            spawn_point2 = Transform(
                Location(x=x_rand_v2, y=y_rand, z=1.320625),
                Rotation(pitch=0.000000, yaw=179.999756, roll=0.000000))
            if self.vehicle2 is None:
                # print("*****************")
                self.vehicle2 = self.world.try_spawn_actor(
                    blueprint2, spawn_point2)
                self.vehicle2.set_velocity(
                    carla.Vector3D(x=-x_speed_randon_v2, y=0.00000,
                                   z=0.000000))
                self.player1.set_velocity(
                    carla.Vector3D(x=-x_speed_player, y=0.00000, z=0.00000))

        # Set up the sensors.
        self.collision_sensor = CollisionSensor(self.player1, self.hud)
        self.lane_invasion_sensor = LaneInvasionSensor(self.player1, self.hud)
        self.gnss_sensor = GnssSensor(self.player1)
        self.camera_manager = CameraManager(self.player1, self.hud)
        self.camera_manager.transform_index = cam_pos_index
        self.camera_manager.set_sensor(cam_index, notify=False)
        actor_type = get_actor_display_name(self.player1)
        self.hud.notification(actor_type)
Example #21
0
def main(x,y,z,pitch,yaw,roll,sensor_id,port,global_file):
    img_size = np.asarray([960,540],dtype=np.int)
    #pygame.init()
    #display = pygame.display.set_mode((VIEW_WIDTH, VIEW_HEIGHT), pygame.HWSURFACE | pygame.DOUBLEBUF)
    #font = get_font()
    #clock = pygame.time.Clock()

    client = carla.Client('localhost', 2000,1)
    client.set_timeout(20.0)
    # cam_subset=1
    #client.reload_world(False)
    #world = client.get_world()
    world = client.load_world('Town03')
    # weather = carla.WeatherParameters(
    #     cloudiness=0.0,
    #     precipitation=0.0,
    #     sun_altitude_angle=50.0)

    
    #world.set_weather(all_weather[weather_num])
    world.set_weather(getattr(carla.WeatherParameters, "ClearNoon"))
    
    
    # 加入其他车辆
    #Spawn_the_vehicles(world,client,car_num[0])
    ###########相机参数
    # cam_type='cam1'
    
    # with open("/media/wuminghu/hard/hard/carla/cam.txt") as csv_file:
    #     reader = csv.reader(csv_file, delimiter=' ')
    #     for line, row in enumerate(reader):
    #         if row[0] in cam_type:
    #             w_x,w_y,w_yaw,cam_H,cam_pitch=[float(i) for i in row[1:]]
    #             break
    ###########相机参数
    # w_x,w_y,w_yaw,cam_H,cam_pitch=-116,100,100,4,90

    actor_list = []
    rgb_list = []
    camera_bp = world.get_blueprint_library().find('sensor.camera.rgb')
    sizex = str(960)
    sizey = str(540)
    fovcfg = str(90)
    camera_bp.set_attribute('image_size_x',sizex )
    camera_bp.set_attribute('image_size_y', sizey)
    camera_bp.set_attribute('fov', fovcfg)
    semseg_bp = world.get_blueprint_library().find('sensor.camera.semantic_segmentation')
    semseg_bp.set_attribute('image_size_x', sizex)
    semseg_bp.set_attribute('image_size_y', sizey)
    semseg_bp.set_attribute('fov', fovcfg)

    out_path="/home/ubuntu/xwp/datasets/multi_view_dataset/new_test"
    if not os.path.exists(out_path):
        os.makedirs(out_path)
    
    cam_point = Transform(Location(x=x, y=y, z=z),
                Rotation(pitch=pitch, yaw=yaw, roll=roll))

    
    camera_rgb = world.spawn_actor(camera_bp,cam_point)
    camera_seg = world.spawn_actor(semseg_bp,cam_point)
    camera_rgb.sensor_name = sensor_id
    camera_seg.sensor_name = sensor_id + "_seg"  
    actor_list.append(camera_rgb)
    actor_list.append(camera_seg)
    rgb_list .append(camera_rgb)
    print('spawned: ',camera_rgb.sensor_name)

    vehicles_actor = Spawn_the_vehicles(world,client,car_num[0],port)
    print('spawned {} vehicles'.format(len(vehicles_actor)))
    ######################################################file
    with CarlaSyncMode(world, *actor_list, fps=20) as sync_mode:
        count=0
        k=0
        while count<60:
        # while count<100:
            count+=1
            #clock.tick()
            # Advance the simulation and wait for the data.
            #snapshot, image_rgb,image_semseg = sync_mode.tick(timeout=2.0)
            blobs = sync_mode.tick(timeout=2.0)
            if count%10!=0:
                continue
            k+=1
            print(k)

            # img.append(image_rgb)
            #image=image_rgb
            # image1=image_semseg.convert(ColorConverter.CityScapesPalette)
            # import pdb; pdb.set_trace()
            all_images = blobs[1:]
            #images = all_images
            images = all_images[::2]
            semseg_images = all_images[1::2]
            snapshot = blobs[0]
            fps = round(1.0 / snapshot.timestamp.delta_seconds)

            
            world_snapshot = world.get_snapshot()

            actual_actor=[world.get_actor(actor_snapshot.id) for actor_snapshot in world_snapshot]
            got_vehicles=[actor for actor in actual_actor if actor.type_id.find('vehicle')!=-1]

            vehicles_list = []
            for camera_rgb in rgb_list:
                vehicles=[vehicle for vehicle in got_vehicles if vehicle.get_transform().location.distance(camera_rgb.get_transform().location)<65]
                vehicles_list.append(vehicles)
                cam_path = out_path + "/{}".format(camera_rgb.sensor_name)
                if not os.path.exists(cam_path):
                    os.makedirs(cam_path)
                    os.makedirs(cam_path+"/label_2")
                    os.makedirs(cam_path+"/image_2")
                    os.makedirs(cam_path+"/calib")
            # debug = world.debug
            # for vehicle in vehicles:
            #     debug.draw_box(carla.BoundingBox(vehicle.get_transform().location+vehicle.bounding_box.location, vehicle.bounding_box.extent), vehicle.get_transform().rotation, 0.05, carla.Color(255,0,0,0), life_time=0.05)
            # import pdb; pdb.set_trace()
            all_vehicles_list = []
            
            for i,(camera_rgb, vehicles,image,osemseg) in enumerate(zip(rgb_list,vehicles_list,images,semseg_images)):
                v=[] #filtered_vehicles

                # Convert semseg to cv.image
                semseg = np.frombuffer(osemseg.raw_data, dtype=np.dtype("uint8"))
                semseg = np.reshape(semseg, (osemseg.height, osemseg.width, 4))
                semseg = semseg[:, :, :3]
                semseg = semseg[:, :, ::-1]
                # BGR
                # Done

                label_files=open("{}/{}/label_2/{:0>6d}.txt".format(out_path,camera_rgb.sensor_name,k), "w")
                car_2d_bbox=[]
                for car in vehicles:
                    extent = car.bounding_box.extent
                    ###############location
                    car_location =car.bounding_box.location
                    # car_location1=car.get_transform().location
                    cords = np.zeros((1, 4))
                    cords[0, :]=np.array([car_location.x,car_location.y,car_location.z, 1])
                    cords_x_y_z = ClientSideBoundingBoxes._vehicle_to_sensor(cords, car, camera_rgb)[:3, :]
                    cords_y_minus_z_x = np.concatenate([cords_x_y_z[1, :], -cords_x_y_z[2, :], cords_x_y_z[0, :]])
                    ###############location
                    bbox = np.transpose(np.dot(camera_coordinate(camera_rgb), cords_y_minus_z_x))
                    camera_bbox = (np.concatenate([bbox[:, 0] / bbox[:, 2], bbox[:, 1] / bbox[:, 2], bbox[:, 2]], axis=1))
                    # pdb.set_trace()
                    camera_bbox_e = camera_bbox[0]
                    # if camera_bbox_e[:,0]<0 or camera_bbox_e[:,1]<0 or camera_bbox_e[:,2]<0:
                    #     continue
                    if camera_bbox_e[:,2]<0:
                        continue
                    
                    #bboxe = camera_bbox
                    xmin,ymin,xmax,ymax=0,0,0,0
                    bboxe = ClientSideBoundingBoxes.get_bounding_boxes([car], camera_rgb)
                    if len(bboxe)==0:
                        continue
                    bboxe=bboxe[0]
                    bbox_in_image = (np.min(bboxe[:,0]), np.min(bboxe[:,1]), np.max(bboxe[:,0]), np.max(bboxe[:,1]))
                    bbox_crop = tuple(max(0, b) for b in bbox_in_image)
                    bbox_crop = (min(img_size[0], bbox_crop[0]),
                                min(img_size[1], bbox_crop[1]),
                                min(img_size[0], bbox_crop[2]),
                                min(img_size[1], bbox_crop[3]))
                    if bbox_crop[0] >= bbox_crop[2] or bbox_crop[1] >= bbox_crop[3]:
                        continue
                    #t_points = [(int(bboxe[i, 0]), int(bboxe[i, 1])) for i in range(8)]
                    # width_x=[int(bboxe[i, 0]) for i in range(8)]
                    # high_y=[int(bboxe[i, 1]) for i in range(8)]
                    # xmin,ymin,xmax,ymax=min(width_x),min(high_y),max(width_x),max(high_y)
                    xmin,ymin,xmax,ymax=bbox_crop
                    # x_cen=(xmin+xmax)/2
                    # y_cen=(ymin+ymax)/2
                    # if x_cen<0 or y_cen<0 or x_cen>VIEW_WIDTH or y_cen>VIEW_HEIGHT:
                    #     continue
                    car_type, truncated, occluded, alpha= 'Car', 0, 0,0
                    dh, dw,dl=extent.z*2,extent.y*2,extent.x*2
                    cords_y_minus_z_x=np.array(cords_y_minus_z_x)
                    ly, lz,lx=cords_y_minus_z_x[0][0],cords_y_minus_z_x[1][0],cords_y_minus_z_x[2][0]
                    lz=lz+dh
                    ry=(car.get_transform().rotation.yaw-camera_rgb.get_transform().rotation.yaw+90)*np.pi/180
                    check_box=False
                    ofs = 10
                    for one_box in car_2d_bbox:
                        xmi,ymi,xma,yma=one_box
                        if xmin>xmi-ofs and ymin>ymi-ofs and xmax<xma+ofs and ymax<yma+ofs:
                            check_box=True
                    if check_box or np.sqrt(ly**2+lx**2)<2:
                        continue

                    # bbox_crop = tuple(max(0, b) for b in [xmin,ymin,xmax,ymax])
                    # bbox_crop = (min(img_size[0], bbox_crop[0]),
                    #             min(img_size[1], bbox_crop[1]),
                    #             min(img_size[0], bbox_crop[2]),
                    #             min(img_size[1], bbox_crop[3]))
                    # Use segment image to determine whether the vehicle is occluded.
                    # See https://carla.readthedocs.io/en/0.9.11/ref_sensors/#semantic-segmentation-camera
                    # print('seg: ',semseg[int((ymin+ymax)/2),int((xmin+xmax)/2),0])
                    # print('x: ',int((ymin+ymax)/2),'y: ',int((xmin+xmax)/2))
                    if semseg[int((ymin+ymax)/2),int((xmin+xmax)/2),0] != 10:
                        continue

                    car_2d_bbox.append(bbox_crop)
                    
                    v.append(car)
                    if car not in all_vehicles_list:
                        all_vehicles_list.append(car)
                    if ry>np.pi:
                        ry-=np.pi
                    if ry<-np.pi:
                        ry+=np.pi
                    # pdb.set_trace()
                    txt="{} {} {} {} {} {} {} {} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {} {}\n".format(car_type, truncated, occluded, alpha, xmin, ymin, xmax, ymax, dh, dw,
                        dl,ly, lz, lx,  ry,car.id)
                    label_files.write(txt)
                label_files.close()
                #print(cam_type,cam_subset,k,len(v))

                #bounding_boxes = ClientSideBoundingBoxes.get_bounding_boxes(v, camera_rgb) 
                ######################################################file
                # pygame.image.save(display,'%s/image_1/%06d.png' % (out_path,k))       
                image.save_to_disk('{}/{}/image_2/{:0>6d}.png'.format (out_path,camera_rgb.sensor_name,k))        
            
            global_file_path = out_path + '/global_label_2'
            if not os.path.exists(global_file_path):
                os.makedirs(global_file_path)
            global_vehicle_file = open("{}/{:0>6d}.txt".format(global_file_path,k), "w")
            for vehicle in all_vehicles_list:
                extent = vehicle.bounding_box.extent
                car_type, truncated, occluded, alpha,xmin,ymin,xmax,ymax= 'Car', 0, 0,0,0,0,0,0
                dh, dw,dl=extent.z*2,extent.y*2,extent.x*2
                location  = vehicle.get_transform().location
                ly,lz,lx = location.x,location.y,location.z
                ry = vehicle.get_transform().rotation.yaw * np.pi / 180
                
                txt="{} {} {} {} {} {} {} {} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {} {}\n".format(car_type, truncated, occluded, alpha, xmin, ymin, xmax, ymax, dh, dw,
                            dl,ly, lz, lx,  ry_filter(ry),vehicle.id)
                global_vehicle_file.write(txt)
            global_vehicle_file.close()


    for camera_rgb in rgb_list:
        calib_files=open("{}/{}/calib/{:0>6d}.txt".format(out_path,camera_rgb.sensor_name,0), "w")
        K=camera_coordinate(camera_rgb)

        txt="P2: {} {} {} {} {} {} {} {} {}\n".format(  K[0][0],K[0][1],K[0][2],
                                                    K[1][0],K[1][1],K[1][2],
                                                    K[2][0],K[2][1],K[2][2])
        calib_files.write(txt)
        calib_files.close()

    print('destroying actors.')
    for actor in actor_list:
        actor.destroy()
    
    # world_snapshot = world.get_snapshot()
    # vehicles_actors = []
    # for vehicle in vehicles_actor:
    #     if world_snapshot.find(vehicle) is not None:
    #         vehicles_actors.append(vehicle)
    # for vehicle in vehicles_actor:
    #client.apply_batch([carla.command.DestroyActor(vehicle) for vehicle in vehicles_actors])

    #pygame.quit()
    return
Example #22
0
 def __update_spectactor_pose(self):
     # Set the world simulation view with respect to the vehicle.
     v_pose = self._ego_vehicle.get_transform()
     v_pose.location -= 10 * Location(v_pose.get_forward_vector())
     v_pose.location.z = 5
     self._spectator.set_transform(v_pose)
Example #23
0
    def Create_actors(self,world, blueprint_library): 
        ego_list = []
        npc_list = []
        obstacle_list = []
        sensor_list = []
        # ego车辆设置---------------------------------------------------------------
        ego_bp = blueprint_library.find(id='vehicle.lincoln.mkz2017')
        # 坐标建立
        ego_transform = Transform(Location(x=30, y=-4.350967, z=0.1), 
                    Rotation(pitch=-0.348271, yaw=180, roll=-0.000000))
        # 车辆从蓝图定义以及坐标生成
        ego = world.spawn_actor(ego_bp, ego_transform)
        ego_list.append(ego)
        print('created %s' % ego.type_id)

        # 视角设置------------------------------------------------------------------
        spectator = world.get_spectator()
        spec_transform = Transform(Location(x=30, y=-4.350967, z=0), 
                    Rotation(pitch=-0.348271, yaw=180, roll=-0.000000))
        spec_transform.location += carla.Location(x=-15,z=50)
        spec_transform.rotation = carla.Rotation(pitch=-90,yaw=0,roll=-0.000000)
        spectator.set_transform(spec_transform)

        # npc设置--------------------------------------------------------------------
        npc_transform = Transform(Location(x=30, y=-4.350967, z=0.1), 
                    Rotation(pitch=-0.348271, yaw=180, roll=-0.000000))
        for i in range(1):
            npc_transform.location += carla.Location(x=-11,y=6)
            npc_transform.rotation = carla.Rotation(pitch=0.348271,yaw=275, roll=-0.000000)
            npc_bp = blueprint_library.find(id='vehicle.lincoln.mkz2017')
            # print(npc_bp.get_attribute('color').recommended_values)
            npc_bp.set_attribute('color', '229,28,0')
            npc = world.try_spawn_actor(npc_bp, npc_transform)
            if npc is None:
                print('%s npc created failed' % i)
            else:
                npc_list.append(npc)
                print('created %s' % npc.type_id)

        # 障碍物设置------------------------------------------------------------------
        obsta_bp = blueprint_library.find(id='static.prop.streetbarrier')
        #障碍物1
        obstacle_transform1 =Transform(Location(x=30, y=-4.350967, z=0), 
                    Rotation(pitch=-0.348271, yaw=180, roll=-0.000000))
        obstacle_transform1.location += carla.Location(x=14,y=1.8)
        obstacle_transform1.rotation = carla.Rotation(pitch=0, yaw=0, roll=0.000000)
        for i in range(10):
            obstacle1 = world.try_spawn_actor(obsta_bp, obstacle_transform1)
            obstacle_transform1.location += carla.Location(x=-2.5,y=-0.04)
            obstacle_list.append(obstacle1)
        #障碍物2
        obstacle_transform2 =Transform(Location(x=30, y=-4.350967, z=0), 
                    Rotation(pitch=-0.348271, yaw=180, roll=-0.000000))
        obstacle_transform2.location += carla.Location(x=-8.5,y=1.82)
        obstacle_transform2.rotation = carla.Rotation(pitch=0, yaw=275, roll=0.000000)
        for i in range(4):
            obstacle2 = world.try_spawn_actor(obsta_bp, obstacle_transform2)
            obstacle_transform2.location += carla.Location(x=-0.1,y=2.5)
            obstacle_list.append(obstacle2)
        #障碍物3
        obstacle_transform3 =Transform(Location(x=30, y=-4.350967, z=0), 
                    Rotation(pitch=-0.348271, yaw=180, roll=-0.000000))
        obstacle_transform3.location += carla.Location(x=13,y=-1.8)
        obstacle_transform3.rotation = carla.Rotation(pitch=0, yaw=0, roll=0.000000)
        for i in range(9):
            obstacle3 = world.try_spawn_actor(obsta_bp, obstacle_transform3)
            obstacle_transform3.location += carla.Location(x=-2.5,y=-0.1)
            obstacle_list.append(obstacle3)
        #障碍物4
        obstacle_transform4 =Transform(Location(x=30, y=-4.350967, z=0), 
                    Rotation(pitch=-0.348271, yaw=180, roll=-0.000000))
        obstacle_transform4.location += carla.Location(x=-24,y=-52)
        obstacle_transform4.rotation = carla.Rotation(pitch=0, yaw=270, roll=0.000000)
        for i in range(15):
            obstacle4 = world.try_spawn_actor(obsta_bp, obstacle_transform4)
            obstacle_transform4.location += carla.Location(x=-0.05,y=2.5)
            obstacle_list.append(obstacle4)
        #障碍物5
        obstacle_transform5 =Transform(Location(x=30, y=-4.350967, z=0), 
                    Rotation(pitch=-0.348271, yaw=180, roll=-0.000000))
        obstacle_transform5.location += carla.Location(x=-20.5,y=-52)
        obstacle_transform5.rotation = carla.Rotation(pitch=0, yaw=270, roll=0.000000)
        for i in range(13):
            obstacle5 = world.try_spawn_actor(obsta_bp, obstacle_transform5)
            obstacle_transform5.location += carla.Location(x=-0.05,y=2.5)
            obstacle_list.append(obstacle5)
        #障碍物6
        obstacle_transform6 =Transform(Location(x=30, y=-4.350967, z=0), 
                    Rotation(pitch=-0.348271, yaw=180, roll=-0.000000))
        obstacle_transform6.location += carla.Location(x=-20,y=-21)
        obstacle_transform6.rotation = carla.Rotation(pitch=0, yaw=45, roll=0.000000)
        for i in range(8):
            obstacle6 = world.try_spawn_actor(obsta_bp, obstacle_transform6)
            obstacle_transform6.location += carla.Location(x=1.7,y=2.4)
            obstacle_list.append(obstacle6)
        # 传感器设置-------------------------------------------------------------------
        ego_collision = SS.CollisionSensor(ego)
        npc_collision = SS.CollisionSensor(npc)
        ego_invasion = SS.LaneInvasionSensor(ego)
        npc_invasion = SS.LaneInvasionSensor(npc)
        sensor_list.extend([[ego_collision,ego_invasion],[npc_collision,npc_invasion]])

        return ego_list,npc_list,obstacle_list,sensor_list
Example #24
0
    def Create_actors(self, world, blueprint_library):
        ego_list = []
        npc_list = []
        obstacle_list = []
        sensor_list = []
        # ego车辆设置---------------------------------------------------------------
        ego_bp = blueprint_library.find(id='vehicle.lincoln.mkz2017')
        # 坐标建立
        ego_transform = Transform(
            Location(x=160.341522, y=-371.640472, z=0.281942),
            Rotation(pitch=0.000000, yaw=0.500910, roll=0.000000))
        # 车辆从蓝图定义以及坐标生成
        ego = world.spawn_actor(ego_bp, ego_transform)
        if ego is None:
            print('ego created failed')
        else:
            ego_list.append(ego)
            print('created %s' % ego.type_id)

        # 视角设置------------------------------------------------------------------
        spectator = world.get_spectator()
        # spec_transform = ego.get_transform()
        spec_transform = Transform(
            Location(x=140.341522, y=-375.140472, z=15.281942),
            Rotation(pitch=0.000000, yaw=0.500910, roll=0.000000))
        spec_transform.location += carla.Location(x=60, z=45)
        spec_transform.rotation = carla.Rotation(pitch=-90, yaw=90)
        spectator.set_transform(spec_transform)

        # npc设置--------------------------------------------------------------------
        npc_transform = ego_transform
        for i in range(1):
            npc_transform.location += carla.Location(x=-15, y=-3.5)
            npc_bp = blueprint_library.find(id='vehicle.lincoln.mkz2017')
            npc_bp.set_attribute('color', '229,28,0')
            npc = world.try_spawn_actor(npc_bp, npc_transform)
            if npc is None:
                print('No.%s npc created failed' % i)
            else:
                npc_list.append(npc)
                print('created %s' % npc.type_id)

        # 障碍物设置------------------------------------------------------------------
        obstacle_transform = ego_transform
        for i in range(28):
            if i == 0:
                obsta_bp = blueprint_library.find(
                    id='vehicle.mercedes-benz.coupe')
                obstacle_transform.location += carla.Location(x=95, y=3.8)
                obstacle = world.try_spawn_actor(obsta_bp, obstacle_transform)
                obstacle_transform.location += carla.Location(x=0, y=-5.3)
                if obstacle is None:
                    print('%s obstacle created failed' % i)
                else:
                    obstacle_list.append(obstacle)
                    print('created %s' % obstacle.type_id)
            else:
                obsta_bp = blueprint_library.find(
                    id='static.prop.streetbarrier')
                obstacle_transform.location += carla.Location(x=-3.5, y=7.4)
                obstacle1 = world.try_spawn_actor(obsta_bp, obstacle_transform)
                obstacle_list.append(obstacle1)
                obstacle_transform.location += carla.Location(y=-7.4)
                obstacle2 = world.try_spawn_actor(obsta_bp, obstacle_transform)
                obstacle_list.append(obstacle2)

        # 传感器设置-------------------------------------------------------------------
        ego_collision = SS.CollisionSensor(ego)
        npc_collision = SS.CollisionSensor(npc)
        # lane_invasion = SS.LaneInvasionSensor(ego)
        sensor_list.append(ego_collision)
        sensor_list.append(npc_collision)
        return ego_list, npc_list, obstacle_list, sensor_list
Example #25
0
 def __contains__(self, loc: carla.Location) -> bool:
     return loc.distance(self.center) <= self.radius
import math

import carla
import numpy as np
import pandas as pd
from scipy.interpolate import splprep, splev

from carla import Transform, Location, Rotation

#Easy selfexplaining lambdas
from config import IMAGE_SIZE

numpy_to_transform = lambda point: Transform(
    Location(point[0], point[1], point[2]),
    Rotation(yaw=point[3], pitch=0, roll=0))
transform_to_numpy = lambda transform: np.array([
    transform.location.x, transform.location.y, transform.location.z, transform
    .rotation.yaw
])
numpy_to_location = lambda point: Location(point[0], point[1], point[2])
location_to_numpy = lambda location: np.array(
    [location.x, location.y, location.z])
velocity_to_kmh = lambda v: float(3.6 * np.math.sqrt(v.x**2 + v.y**2 + v.z**2))
numpy_to_velocity_vec = lambda v: carla.Vector3D(x=v[0], y=v[1], z=v[2])


def df_to_spawn_points(data: pd.DataFrame,
                       n: int = 10000,
                       invert: bool = False) -> np.array:
    '''
    Method converting spawnpoints loaded from DataFrame into equally placed points on the map.
Example #27
0
 def _distance(self, wp: msg.WaypointWithSpeedLimit,
               loc: carla.Location) -> float:
     wp_loc = carla.Location(wp.x, wp.y)
     return loc.distance(wp_loc)
Example #28
0
    def Create_actors(self, world, blueprint_library):
        ego_list = []
        npc_list = []
        obstacle_list = []
        sensor_list = []
        # ego车辆设置---------------------------------------------------------------
        ego_bp = blueprint_library.find(id='vehicle.lincoln.mkz2017')
        # 坐标建立
        ego_transform = Transform(Location(x=9, y=-110.350967, z=0.1),
                                  Rotation(pitch=0, yaw=-90, roll=-0.000000))
        # 车辆从蓝图定义以及坐标生成
        ego = world.spawn_actor(ego_bp, ego_transform)
        ego_list.append(ego)
        print('created %s' % ego.type_id)

        # 视角设置------------------------------------------------------------------
        spectator = world.get_spectator()
        # spec_transform = ego.get_transform()
        spec_transform = Transform(Location(x=9, y=-115.350967, z=0),
                                   Rotation(pitch=0, yaw=180, roll=-0.000000))
        spec_transform.location += carla.Location(x=-5, z=60)
        spec_transform.rotation = carla.Rotation(pitch=-90,
                                                 yaw=1.9,
                                                 roll=-0.000000)
        spectator.set_transform(spec_transform)

        # npc设置--------------------------------------------------------------------
        npc_transform = Transform(Location(x=9, y=-110.350967, z=0.1),
                                  Rotation(pitch=0, yaw=-90, roll=-0.000000))
        for i in range(1):
            npc_transform.location += carla.Location(x=-18, y=-24)
            npc_transform.rotation = carla.Rotation(pitch=0,
                                                    yaw=0,
                                                    roll=-0.000000)
            npc_bp = blueprint_library.find(id='vehicle.lincoln.mkz2017')
            # print(npc_bp.get_attribute('color').recommended_values)
            npc_bp.set_attribute('color', '229,28,0')
            npc = world.try_spawn_actor(npc_bp, npc_transform)
            if npc is None:
                print('%s npc created failed' % i)
            else:
                npc_list.append(npc)
                print('created %s' % npc.type_id)

        # 障碍物设置------------------------------------------------------------------
        obsta_bp = blueprint_library.find(id='static.prop.streetbarrier')
        # 障碍物1
        obstacle_transform1 = Transform(
            Location(x=9, y=-110.350967, z=0),
            Rotation(pitch=0, yaw=-90, roll=-0.000000))
        obstacle_transform1.location += carla.Location(x=50, y=-27, z=3)
        obstacle_transform1.rotation = carla.Rotation(pitch=0,
                                                      yaw=0,
                                                      roll=0.000000)
        for i in range(30):
            obstacle1 = world.try_spawn_actor(obsta_bp, obstacle_transform1)
            obstacle_transform1.location += carla.Location(x=-2.5,
                                                           y=-0.05,
                                                           z=-0.12)
            obstacle_list.append(obstacle1)
        # 障碍物2
        # obstacle_transform2 = Transform(Location(x=9, y=-110.350967,z=0),
        #             Rotation(pitch=0, yaw=-90, roll=-0.000000))
        # obstacle_transform2.location += carla.Location(x=-2.8,y=-18.5)
        # obstacle_transform2.rotation = carla.Rotation(pitch=0, yaw=0, roll=0.000000)
        # for i in range(7):
        #     obstacle2 = world.try_spawn_actor(obsta_bp, obstacle_transform2)
        #     obstacle_transform2.location += carla.Location(x=-2.5)
        #     obstacle_list.append(obstacle2)
        # # 障碍物3
        # obstacle_transform3 = Transform(Location(x=9, y=-110.350967,z=0),
        #             Rotation(pitch=0, yaw=-90, roll=-0.000000))
        # obstacle_transform3.location += carla.Location(x=-1.65,y=-17.5)
        # obstacle_transform3.rotation = carla.Rotation(pitch=0, yaw=90, roll=0.000000)
        # for i in range(10):
        #     obstacle3 = world.try_spawn_actor(obsta_bp, obstacle_transform3)
        #     obstacle_transform3.location += carla.Location(x=0.006,y=2.5)
        #     obstacle_list.append(obstacle3)
        # # 障碍物4
        # obstacle_transform4 = Transform(Location(x=9, y=-110.350967,z=0),
        #             Rotation(pitch=0, yaw=-90, roll=-0.000000))
        # obstacle_transform4.location += carla.Location(x=2.45,y=-15)
        # obstacle_transform4.rotation = carla.Rotation(pitch=0, yaw=90, roll=0.000000)
        # for i in range(10):
        #     obstacle4 = world.try_spawn_actor(obsta_bp, obstacle_transform4)
        #     obstacle_transform4.location += carla.Location(y=2.5)
        #     obstacle_list.append(obstacle4)

        # 传感器设置-------------------------------------------------------------------
        ego_collision = SS.CollisionSensor(ego)
        npc_collision = SS.CollisionSensor(npc)
        ego_invasion = SS.LaneInvasionSensor(ego)
        npc_invasion = SS.LaneInvasionSensor(npc)
        sensor_list.extend([[ego_collision, ego_invasion],
                            [npc_collision, npc_invasion]])

        return ego_list, npc_list, obstacle_list, sensor_list
Example #29
0
def get_top_down_transform(transform, top_down_camera_altitude):
    # Calculation relies on the fact that the camera's FOV is 90.
    top_down_location = (transform.location +
                         Location(0, 0, top_down_camera_altitude))
    return Transform(top_down_location, Rotation(-90, 0, 0))
Example #30
0
class Transform(object):
    """A class that stores the location and rotation of an obstacle.

    It can be created from a simulator transform, defines helper functions
    needed in Pylot, and makes the simulator transform serializable.

    A transform object is instantiated with either a location and a rotation,
    or using a matrix.

    Args:
        location (:py:class:`.Location`, optional): The location of the object
            represented by the transform.
        rotation (:py:class:`.Rotation`, optional): The rotation  (in degrees)
            of the object represented by the transform.
        matrix: The transformation matrix used to convert points in the 3D
            coordinate space with respect to the location and rotation of the
            given object.

    Attributes:
        location (:py:class:`.Location`): The location of the object
            represented by the transform.
        rotation (:py:class:`.Rotation`): The rotation (in degrees) of the
            object represented by the transform.
        forward_vector (:py:class:`.Vector3D`): The forward vector of the
            object represented by the transform.
        matrix: The transformation matrix used to convert points in the 3D
            coordinate space with respect to the location and rotation of the
            given object.
    """
    def __init__(self,
                 location: Location = None,
                 rotation: Rotation = None,
                 matrix=None):
        if matrix is not None:
            self.matrix = matrix
            self.location = Location(matrix[0, 3], matrix[1, 3], matrix[2, 3])

            # Forward vector is retrieved from the matrix.
            self.forward_vector = \
                Vector3D(self.matrix[0, 0], self.matrix[1, 0],
                         self.matrix[2, 0])
            pitch_r = math.asin(np.clip(self.forward_vector.z, -1, 1))
            yaw_r = math.acos(
                np.clip(self.forward_vector.x / math.cos(pitch_r), -1, 1))
            roll_r = math.asin(
                np.clip(matrix[2, 1] / (-1 * math.cos(pitch_r)), -1, 1))
            self.rotation = Rotation(math.degrees(pitch_r),
                                     math.degrees(yaw_r), math.degrees(roll_r))
        else:
            self.location, self.rotation = location, rotation
            self.matrix = Transform._create_matrix(self.location,
                                                   self.rotation)

            # Forward vector is retrieved from the matrix.
            self.forward_vector = \
                Vector3D(self.matrix[0, 0], self.matrix[1, 0],
                         self.matrix[2, 0])

    @classmethod
    def from_simulator_transform(cls, transform):
        """Creates a pylot transform from a simulator transform.

        Args:
            transform: A simulator transform.

        Returns:
            :py:class:`.Transform`: An instance of a pylot transform.
        """
        from carla import Transform
        if not isinstance(transform, Transform):
            raise ValueError('transform should be of type Transform')
        return cls(Location.from_simulator_location(transform.location),
                   Rotation.from_simulator_rotation(transform.rotation))

    @staticmethod
    def _create_matrix(location, rotation):
        """Creates a transformation matrix to convert points in the 3D world
        coordinate space with respect to the object.

        Use the transform_points function to transpose a given set of points
        with respect to the object.

        Args:
            location (:py:class:`.Location`): The location of the object
                represented by the transform.
            rotation (:py:class:`.Rotation`): The rotation of the object
                represented by the transform.

        Returns:
            A 4x4 numpy matrix which represents the transformation matrix.
        """
        matrix = np.identity(4)
        cy = math.cos(np.radians(rotation.yaw))
        sy = math.sin(np.radians(rotation.yaw))
        cr = math.cos(np.radians(rotation.roll))
        sr = math.sin(np.radians(rotation.roll))
        cp = math.cos(np.radians(rotation.pitch))
        sp = math.sin(np.radians(rotation.pitch))
        matrix[0, 3] = location.x
        matrix[1, 3] = location.y
        matrix[2, 3] = location.z
        matrix[0, 0] = (cp * cy)
        matrix[0, 1] = (cy * sp * sr - sy * cr)
        matrix[0, 2] = -1 * (cy * sp * cr + sy * sr)
        matrix[1, 0] = (sy * cp)
        matrix[1, 1] = (sy * sp * sr + cy * cr)
        matrix[1, 2] = (cy * sr - sy * sp * cr)
        matrix[2, 0] = (sp)
        matrix[2, 1] = -1 * (cp * sr)
        matrix[2, 2] = (cp * cr)
        return matrix

    def __transform(self, points, matrix):
        """Internal function to transform the points according to the
        given matrix. This function either converts the points from
        coordinate space relative to the transform to the world coordinate
        space (using self.matrix), or from world coordinate space to the
        space relative to the transform (using inv(self.matrix))

        Args:
            points: An n by 3 numpy array, where each row is the
                (x, y, z) coordinates of a point.
            matrix: The matrix of the transformation to apply.

        Returns:
            An n by 3 numpy array of transformed points.
        """
        # Needed format: [[X0,..Xn],[Y0,..Yn],[Z0,..Zn]].
        # So let's transpose the point matrix.
        points = points.transpose()

        # Add 1s row: [[X0..,Xn],[Y0..,Yn],[Z0..,Zn],[1,..1]]
        points = np.append(points, np.ones((1, points.shape[1])), axis=0)

        # Point transformation (depends on the given matrix)
        points = np.dot(matrix, points)

        # Get all but the last row in array form.
        points = np.asarray(points[0:3].transpose()).astype(np.float16)

        return points

    def transform_points(self, points):
        """Transforms the given set of points (specified in the coordinate
        space of the current transform) to be in the world coordinate space.

        For example, if the transform is at location (3, 0, 0) and the
        location passed to the argument is (10, 0, 0), this function will
        return (13, 0, 0) i.e. the location of the argument in the world
        coordinate space.

        Args:
            points: A (number of points) by 3 numpy array, where each row is
                the (x, y, z) coordinates of a point.

        Returns:
            An n by 3 numpy array of transformed points.
        """
        return self.__transform(points, self.matrix)

    def inverse_transform_points(self, points):
        """Transforms the given set of points (specified in world coordinate
        space) to be relative to the given transform.

        For example, if the transform is at location (3, 0, 0) and the location
        passed to the argument is (10, 0, 0), this function will return
        (7, 0, 0) i.e. the location of the argument relative to the given
        transform.

        Args:
            points: A (number of points) by 3 numpy array, where each row is
                the (x, y, z) coordinates of a point.

        Returns:
            An n by 3 numpy array of transformed points.
        """
        return self.__transform(points, np.linalg.inv(self.matrix))

    def transform_locations(self, locations):
        """Transforms the given set of locations (specified in the coordinate
        space of the current transform) to be in the world coordinate space.

        This method has the same functionality as transform_points, and
        is provided for convenience; when dealing with a large number of
        points, it is advised to use transform_points to avoid the slow
        conversion between a numpy array and list of locations.

        Args:
            points (list(:py:class:`.Location`)): List of locations.

        Returns:
            list(:py:class:`.Location`): List of transformed points.
        """
        points = np.array([loc.as_numpy_array() for loc in locations])
        transformed_points = self.__transform(points, self.matrix)
        return [Location(x, y, z) for x, y, z in transformed_points]

    def inverse_transform_locations(self, locations):
        """Transforms the given set of locations (specified in world coordinate
        space) to be relative to the given transform.

        This method has the same functionality as inverse_transform_points,
        and is provided for convenience; when dealing with a large number of
        points, it is advised to use inverse_transform_points to avoid the slow
        conversion between a numpy array and list of locations.

        Args:
            points (list(:py:class:`.Location`)): List of locations.

        Returns:
            list(:py:class:`.Location`): List of transformed points.
        """

        points = np.array([loc.as_numpy_array() for loc in locations])
        transformed_points = self.__transform(points,
                                              np.linalg.inv(self.matrix))
        return [Location(x, y, z) for x, y, z in transformed_points]

    def as_simulator_transform(self):
        """Converts the transform to a simulator transform.

        Returns:
            An instance of the simulator class representing the Transform.
        """
        from carla import Location, Rotation, Transform
        return Transform(
            Location(self.location.x, self.location.y, self.location.z),
            Rotation(pitch=self.rotation.pitch,
                     yaw=self.rotation.yaw,
                     roll=self.rotation.roll))

    def get_angle_and_magnitude(self, target_loc):
        """Computes relative angle between the transform and a target location.

        Args:
            target_loc (:py:class:`.Location`): Location of the target.

        Returns:
            Angle in radians and vector magnitude.
        """
        target_vec = target_loc.as_vector_2D() - self.location.as_vector_2D()
        magnitude = target_vec.magnitude()
        if magnitude > 0:
            forward_vector = Vector2D(
                math.cos(math.radians(self.rotation.yaw)),
                math.sin(math.radians(self.rotation.yaw)))
            angle = target_vec.get_angle(forward_vector)
        else:
            angle = 0
        return angle, magnitude

    def is_within_distance_ahead(self, dst_loc: Location,
                                 max_distance: float) -> bool:
        """Checks if a location is within a distance.

        Args:
            dst_loc (:py:class:`.Location`): Location to compute distance to.
            max_distance (:obj:`float`): Maximum allowed distance.

        Returns:
            bool: True if other location is within max_distance.
        """
        d_angle, norm_dst = self.get_angle_and_magnitude(dst_loc)
        # Return if the vector is too small.
        if norm_dst < 0.001:
            return True
        # Return if the vector is greater than the distance.
        if norm_dst > max_distance:
            return False
        return d_angle < 90.0

    def inverse_transform(self):
        """Returns the inverse transform of this transform."""
        new_matrix = np.linalg.inv(self.matrix)
        return Transform(matrix=new_matrix)

    def __mul__(self, other):
        new_matrix = np.dot(self.matrix, other.matrix)
        return Transform(matrix=new_matrix)

    def __repr__(self):
        return self.__str__()

    def __str__(self):
        if self.location:
            return "Transform(location: {}, rotation: {})".format(
                self.location, self.rotation)
        else:
            return "Transform({})".format(str(self.matrix))