Пример #1
0
def main():
    argparser = argparse.ArgumentParser(
        description=__doc__)
    argparser.add_argument(
        '--host',
        metavar='H',
        default='127.0.0.1',
        help='IP of the host server (default: 127.0.0.1)')
    argparser.add_argument(
        '--data_dir',
        metavar='D',
        default='lidar_output_def',
        help='Directory to save lidar data')
    argparser.add_argument(
        '-p', '--port',
        metavar='P',
        default=2000,
        type=int,
        help='TCP port to listen to (default: 2000)')
    argparser.add_argument(
        '--save_lidar_data', 
        default=False, 
        action='store_true',
        help='To save lidar points or not')
    argparser.add_argument(
        '--save_gt', 
        default=False, 
        action='store_true',
        help='To save ground truth or not')
    argparser.add_argument(
        '--town',
        default='Town03',
        help='Spawn in Town01, Town02 or Town03'
    )
    args = argparser.parse_args()

    shutil.rmtree(args.data_dir, ignore_errors=True) 
    Path(args.data_dir + '/lidar').mkdir(parents=True, exist_ok=True)

    logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)
    # keyboard = Keyboard(0.05)
    client = carla.Client(args.host, args.port)
    client.set_timeout(10.0)
    client.load_world(args.town)

    
    # Setting synchronous mode
    # This is essential for proper workiong of sensors
    world = client.get_world()
    settings = world.get_settings()
    settings.synchronous_mode = True
    settings.fixed_delta_seconds = 0.05  # FPS = 1/0.05 = 20
    world.apply_settings(settings)

    try:

        world = client.get_world()
        ego_vehicles = []
        ego_cam = None
        ego_col = None
        ego_lane = None
        ego_obs = None
        ego_gnss = None
        ego_imu = None

        # --------------
        # Start recording
        # --------------
        """
        client.start_recorder('~/tutorial/recorder/recording01.log')
        """

        # --------------
        # Spawn ego vehicle
        # --------------
        # vehicles_list = custom_spawn(world)


        num_vehicles = len(ego_transforms)
        ego_bps = []
        for i in range(num_vehicles):
            ego_bps.append(world.get_blueprint_library().find('vehicle.tesla.model3'))
        for i in range(num_vehicles):
            ego_bps[i].set_attribute('role_name', 'ego')
        print('\nEgo role_names are set')
        for i in range(num_vehicles):
            ego_color = random.choice(ego_bps[i].get_attribute('color').recommended_values)
            ego_bps[i].set_attribute('color', ego_color)
        print('\nEgo colors are set')

        # spawn_points = world.get_map().get_spawn_points()
        # number_of_spawn_points = len(spawn_points)        
        
        # spawn num_vehicles vehicles
        for i in range(num_vehicles):
            ego_vehicles.append(world.spawn_actor(ego_bps[i], ego_transforms[i]))
       
        # --------------
        # Add a new LIDAR sensor to my ego
        # --------------

        # Default
        # lidar_cam = None
        # lidar_bp = world.get_blueprint_library().find('sensor.lidar.ray_cast')
        # lidar_bp.set_attribute('channels',str(32))
        # lidar_bp.set_attribute('points_per_second',str(90000))
        # lidar_bp.set_attribute('rotation_frequency',str(40))
        # lidar_bp.set_attribute('range',str(20))
        # lidar_location = carla.Location(0,0,2)
        # lidar_rotation = carla.Rotation(0,0,0)
        # lidar_transform = carla.Transform(lidar_location,lidar_rotation)
        # lidar_sen = world.spawn_actor(lidar_bp,lidar_transform,attach_to=ego_vehicle)
        # lidar_sen.listen(lambda point_cloud: process_point_cloud(point_cloud, args.save_lidar_data))

        # VLP 16
        lidar_cam = None
        lidar_bp = world.get_blueprint_library().find('sensor.lidar.ray_cast')
        lidar_bp.set_attribute('channels',str(16))
        lidar_bp.set_attribute('rotation_frequency',str(20)) # Set the fps of simulator same as this
        lidar_bp.set_attribute('range',str(50))
        lidar_bp.set_attribute('lower_fov', str(-15))
        lidar_bp.set_attribute('upper_fov', str(15))
        lidar_bp.set_attribute('points_per_second',str(300000))
        lidar_bp.set_attribute('dropoff_general_rate',str(0.0))

        
        # lidar_bp.set_attribute('noise_stddev',str(0.173))
        # lidar_bp.set_attribute('noise_stddev',str(0.141)) Works in this case 

        lidar_location = carla.Location(0,0,2)
        lidar_rotation = carla.Rotation(0,0,0)
        lidar_transform = carla.Transform(lidar_location,lidar_rotation)
        lidar_sen = world.spawn_actor(lidar_bp,lidar_transform,attach_to=ego_vehicles[0])
        lidar_sen.listen(lambda point_cloud: process_point_cloud(args, point_cloud, args.save_lidar_data))

        

        # --------------
        # Enable autopilot for ego vehicle
        # --------------
        for i in range(num_vehicles):
            ego_vehicles[i].set_autopilot(False) 
        
        # --------------
        # Dummy Actor for spectator
        # --------------
        dummy_bp = world.get_blueprint_library().find('sensor.camera.rgb')
        dummy_transform = carla.Transform(carla.Location(x=-6, z=4), carla.Rotation(pitch=10.0))
        dummy = world.spawn_actor(dummy_bp, dummy_transform, attach_to=ego_vehicles[0], attachment_type=carla.AttachmentType.SpringArm)
        dummy.listen(lambda image: dummy_function(image))
        
        spectator = world.get_spectator()
        spectator.set_transform(dummy.get_transform())



        gt_array = []

        # --------------
        # Game loop. Prevents the script from finishing.
        # --------------
        count = -1
        while True:
            # This is for async mode
            # world_snapshot = world.wait_for_tick() 
            
            # In synchronous mode, the client ticks the world
            world.tick()
            
            count+= 1
            if count == 0:
                start_tf = ego_vehicles[0].get_transform()
                start_tf.rotation.yaw -= 3
                print('Start TF: ', start_tf)
                # T_start = np.array(start_tf.get_inverse_matrix()) Can be used?
                # print(start_tf.transform(start_tf.location))
                bias_tf = start_tf.transform(ego_vehicles[0].get_transform().location)
                [print('Ego Vehicle ID is: ', ego_vehicles[i].id) for i in range(num_vehicles)]
                input('\nPress Enter to Continue:')

            # print(start_tf.transform(ego_vehicle.get_transform().location))
            spectator.set_transform(dummy.get_transform())

            if args.save_gt:
                vehicle_tf =  ego_vehicles[0].get_transform()
                vehicle_tf_odom = start_tf.transform(vehicle_tf.location) - bias_tf
                vehicle_tf_odom = np.array([vehicle_tf_odom.x, vehicle_tf_odom.y, vehicle_tf_odom.z])
                gt_array.append(vehicle_tf_odom)
            
    except Exception as e:
        print(e)

    finally:

        if args.save_gt:
            gt_array = np.array(gt_array)
            np.savetxt( args.data_dir + '/gt.csv', gt_array, delimiter=',')

        # --------------
        # Stop recording and destroy actors
        # --------------
        client.stop_recorder()
        if ego_vehicles is not None:
            if ego_cam is not None:
                ego_cam.stop()
                ego_cam.destroy()
            if ego_col is not None:
                ego_col.stop()
                ego_col.destroy()
            if ego_lane is not None:
                ego_lane.stop()
                ego_lane.destroy()
            if ego_obs is not None:
                ego_obs.stop()
                ego_obs.destroy()
            if ego_gnss is not None:
                ego_gnss.stop()
                ego_gnss.destroy()
            if ego_imu is not None:
                ego_imu.stop()
                ego_imu.destroy()
            if lidar_sen is not None:
                lidar_sen.stop()
                lidar_sen.destroy()
            if dummy is not None:
                dummy.stop()
                dummy.destroy()
            [ego_vehicles[i].destroy() for i in range(num_vehicles)]
    def main_test(self):
        restart_time = time.time()
        PATH = "/home/a/RL_decision/trained_info.tar"
        print(torch.cuda.get_device_name())
        clock = pygame.time.Clock()
        Keyboardcontrol = KeyboardControl(self, False)
        display = pygame.display.set_mode((self.width, self.height),
                                          pygame.HWSURFACE | pygame.DOUBLEBUF)

        self.lane_start_point = [
            carla.Location(x=14.905815, y=-135.747452, z=0.000000),
            carla.Location(x=172.745468, y=-364.531799, z=0.000000),
            carla.Location(x=382.441040, y=-212.488907, z=0.000000)
        ]
        self.lane_finished_point = [
            carla.Location(x=14.631096, y=-205.746918, z=0.000000),
            carla.Location(x=232.962860, y=-364.149139, z=0.000000),
            carla.Location(x=376.542816, y=-10.352980, z=0.000000)
        ]
        self.lane_change_point = [
            carla.Location(x=14.905815, y=-135.747452, z=0.000000),
            carla.Location(x=14.631096, y=-205.746918, z=0.000000),
            carla.Location(x=172.745468, y=-364.531799, z=0.000000),
            carla.Location(x=232.962860, y=-364.149139, z=0.000000),
            carla.Location(x=382.441040, y=-212.488907, z=0.000000),
            carla.Location(x=376.542816, y=-10.352980, z=0.000000)
        ]

        self.world.debug.draw_string(carla.Location(x=14.905815,
                                                    y=-135.747452,
                                                    z=0.000000),
                                     'o',
                                     draw_shadow=True,
                                     color=carla.Color(r=0, g=255, b=0),
                                     life_time=9999)
        self.world.debug.draw_string(carla.Location(x=14.631096,
                                                    y=-205.746918,
                                                    z=0.000000),
                                     'o',
                                     draw_shadow=True,
                                     color=carla.Color(r=0, g=255, b=0),
                                     life_time=9999)
        # ---------------------------#

        self.world.debug.draw_string(carla.Location(x=172.745468,
                                                    y=-364.531799,
                                                    z=0.000000),
                                     'o',
                                     draw_shadow=True,
                                     color=carla.Color(r=0, g=255, b=0),
                                     life_time=9999)
        self.world.debug.draw_string(carla.Location(x=232.962860,
                                                    y=-364.149139,
                                                    z=0.000000),
                                     'o',
                                     draw_shadow=True,
                                     color=carla.Color(r=0, g=255, b=0),
                                     life_time=9999)
        # ---------------------------#
        self.world.debug.draw_string(carla.Location(x=382.441040,
                                                    y=-212.488907,
                                                    z=0.000000),
                                     'o',
                                     draw_shadow=True,
                                     color=carla.Color(r=0, g=255, b=0),
                                     life_time=9999)
        self.world.debug.draw_string(carla.Location(x=376.542816,
                                                    y=-10.352980,
                                                    z=0.000000),
                                     'o',
                                     draw_shadow=True,
                                     color=carla.Color(r=0, g=255, b=0),
                                     life_time=9999)

        lane_distance_between_points = []
        for i in range(len(self.lane_finished_point)):
            lane_distance_between_points.append(
                ((self.lane_start_point[i].x - self.lane_finished_point[i].x)**
                 2 + (self.lane_start_point[i].y -
                      self.lane_finished_point[i].y)**2 +
                 (self.lane_start_point[i].z - self.lane_finished_point[i].z)**
                 2)**0.5)

        pre_decision = None
        while True:
            # if time.time()-restart_time > 10:
            #     print("restart")
            #     self.restart()
            if Keyboardcontrol.parse_events(client, self, clock):
                return
            self.spectator.set_transform(
                carla.Transform(
                    self.player.get_transform().location +
                    carla.Location(z=50), carla.Rotation(pitch=-90)))
            self.camera_rgb.render(display)
            self.hud.render(display)
            pygame.display.flip()

            ## Get max lane ##
            # print("start get lane")
            if self.section < len(lane_distance_between_points):

                self.lane_distance_between_start = (
                    (self.player.get_transform().location.x -
                     self.lane_start_point[self.section].x)**2 +
                    (self.player.get_transform().location.y -
                     self.lane_start_point[self.section].y)**2)**0.5
                self.lane_distance_between_end = (
                    (self.player.get_transform().location.x -
                     self.lane_finished_point[self.section].x)**2 +
                    (self.player.get_transform().location.y -
                     self.lane_finished_point[self.section].y)**2)**0.5

                # print("self.lane_distance_between_start : ",self.lane_distance_between_start,"self.lane_distance_between_end :",self.lane_distance_between_end, "lane_distance_between_points[section]",lane_distance_between_points[self.section],"section :", self.section)
                if max(lane_distance_between_points[self.section], self.lane_distance_between_start, self.lane_distance_between_end) == \
                        lane_distance_between_points[self.section]:
                    self.max_Lane_num = 3
                    # world.debug.draw_string(self.player.get_transform().location, 'o', draw_shadow = True,
                    #                                 color = carla.Color(r=255, g=255, b=0), life_time = 999)

                elif max(lane_distance_between_points[self.section], self.lane_distance_between_start, self.lane_distance_between_end) == \
                        self.lane_distance_between_start and self.max_Lane_num ==3:
                    self.section += 1
                    self.max_Lane_num = 4
                    if self.section >= len(lane_distance_between_points
                                           ):  # when, section_num = 3
                        self.section = 0
            ## finished get max lane ##
            # print("finished get lane")

            self.search_distance_valid()
            # print("distance :" ,,"max_lane_num : ", self.max_Lane_num)

            if self.max_Lane_num == 3:
                self.world.debug.draw_string(
                    self.player.get_transform().location,
                    'o',
                    draw_shadow=True,
                    color=carla.Color(r=255, g=255, b=0),
                    life_time=9999)
            if time.time() - self.lane_change_time > 10:
                self.lane_change_time = time.time()
                if pre_decision is None:
                    self.decision = 1
                    self.ego_Lane += 1
                    pre_decision = 1

                elif pre_decision == 1:
                    pre_decision = -1
                    self.ego_Lane += -1
                    self.decision = -1

                elif pre_decision == -1:
                    self.decision = 1
                    self.ego_Lane += 1
                    pre_decision = 1

            else:
                self.decision = 0

            self.controller.apply_control(self.decision)
            # self.world.wait_for_tick(10.0)
            clock.tick(30)

            self.hud.tick(self, clock)
Пример #3
0
    def setup_sensors(self, vehicle, debug_mode=False):
        """
        Create the sensors defined by the user and attach them to the ego-vehicle
        :param vehicle: ego vehicle
        :return:
        """
        bp_library = CarlaDataProvider.get_world().get_blueprint_library()
        for sensor_spec in self._agent.sensors():
            # These are the pseudosensors (not spawned)
            if sensor_spec['type'].startswith('sensor.opendrive_map'):
                # The HDMap pseudo sensor is created directly here
                sensor = OpenDriveMapReader(vehicle, sensor_spec['reading_frequency'])
            elif sensor_spec['type'].startswith('sensor.speedometer'):
                delta_time = CarlaDataProvider.get_world().get_settings().fixed_delta_seconds
                frame_rate = 1 / delta_time
                sensor = SpeedometerReader(vehicle, frame_rate)
            # These are the sensors spawned on the carla world
            else:
                bp = bp_library.find(str(sensor_spec['type']))
                if sensor_spec['type'].startswith('sensor.camera'):
                    bp.set_attribute('image_size_x', str(sensor_spec['width']))
                    bp.set_attribute('image_size_y', str(sensor_spec['height']))
                    bp.set_attribute('fov', str(sensor_spec['fov']))
                    bp.set_attribute('lens_circle_multiplier', str(3.0))
                    bp.set_attribute('lens_circle_falloff', str(3.0))
                    bp.set_attribute('chromatic_aberration_intensity', str(0.5))
                    bp.set_attribute('chromatic_aberration_offset', str(0))
                    # Change __call__ in autonomous agent if you change sensor_tick.
                    bp.set_attribute('sensor_tick', '0.05')

                    sensor_location = carla.Location(x=sensor_spec['x'], y=sensor_spec['y'],
                                                     z=sensor_spec['z'])
                    sensor_rotation = carla.Rotation(pitch=sensor_spec['pitch'],
                                                     roll=sensor_spec['roll'],
                                                     yaw=sensor_spec['yaw'])
                elif sensor_spec['type'].startswith('sensor.lidar'):
                    bp.set_attribute('range', str(85))
                    bp.set_attribute('rotation_frequency', str(20))
                    bp.set_attribute('channels', str(64))
                    bp.set_attribute('upper_fov', str(10))
                    bp.set_attribute('lower_fov', str(-30))
                    bp.set_attribute('points_per_second', str(600000))
                    # bp.set_attribute('atmosphere_attenuation_rate', str(0.004))
                    # bp.set_attribute('dropoff_general_rate', str(0.45))
                    # bp.set_attribute('dropoff_intensity_limit', str(0.8))
                    # bp.set_attribute('dropoff_zero_intensity', str(0.4))
                    bp.set_attribute('sensor_tick', '0.05')
                    sensor_location = carla.Location(x=sensor_spec['x'], y=sensor_spec['y'],
                                                     z=sensor_spec['z'])
                    sensor_rotation = carla.Rotation(pitch=sensor_spec['pitch'],
                                                     roll=sensor_spec['roll'],
                                                     yaw=sensor_spec['yaw'])
                elif sensor_spec['type'].startswith('sensor.other.radar'):
                    bp.set_attribute('horizontal_fov', str(sensor_spec['fov']))  # degrees
                    bp.set_attribute('vertical_fov', str(sensor_spec['fov']))  # degrees
                    bp.set_attribute('points_per_second', '1500')
                    bp.set_attribute('range', '100')  # meters

                    sensor_location = carla.Location(x=sensor_spec['x'],
                                                     y=sensor_spec['y'],
                                                     z=sensor_spec['z'])
                    sensor_rotation = carla.Rotation(pitch=sensor_spec['pitch'],
                                                     roll=sensor_spec['roll'],
                                                     yaw=sensor_spec['yaw'])

                elif sensor_spec['type'].startswith('sensor.other.gnss'):
                    bp.set_attribute('noise_alt_stddev', str(0.000005))
                    bp.set_attribute('noise_lat_stddev', str(0.000005))
                    bp.set_attribute('noise_lon_stddev', str(0.000005))
                    bp.set_attribute('noise_alt_bias', str(0.0))
                    bp.set_attribute('noise_lat_bias', str(0.0))
                    bp.set_attribute('noise_lon_bias', str(0.0))

                    sensor_location = carla.Location(x=sensor_spec['x'],
                                                     y=sensor_spec['y'],
                                                     z=sensor_spec['z'])
                    sensor_rotation = carla.Rotation()

                elif sensor_spec['type'].startswith('sensor.other.imu'):
                    bp.set_attribute('noise_accel_stddev_x', str(0.001))
                    bp.set_attribute('noise_accel_stddev_y', str(0.001))
                    bp.set_attribute('noise_accel_stddev_z', str(0.015))
                    bp.set_attribute('noise_gyro_stddev_x', str(0.001))
                    bp.set_attribute('noise_gyro_stddev_y', str(0.001))
                    bp.set_attribute('noise_gyro_stddev_z', str(0.001))

                    sensor_location = carla.Location(x=sensor_spec['x'],
                                                     y=sensor_spec['y'],
                                                     z=sensor_spec['z'])
                    sensor_rotation = carla.Rotation(pitch=sensor_spec['pitch'],
                                                     roll=sensor_spec['roll'],
                                                     yaw=sensor_spec['yaw'])
                # create sensor
                sensor_transform = carla.Transform(sensor_location, sensor_rotation)
                sensor = CarlaDataProvider.get_world().spawn_actor(bp, sensor_transform, vehicle)
            # setup callback
            sensor.listen(CallBack(sensor_spec['id'], sensor_spec['type'], sensor, self._agent.sensor_interface))
            self._sensors_list.append(sensor)

        # Tick once to spawn the sensors
        CarlaDataProvider.get_world().tick()
Пример #4
0
    def __init__(self, parent_actor, width, height, gap_x, diplace_y):
        self.save_counter = 0
        self.width = width
        self.height = height
        self.sensor = None
        self._surface = None
        self._parent = parent_actor
        # self._hud = hud
        self._recording = False
        self.timedata = time.strftime("%Y-%d-%I-%M-%S",
                                      time.localtime(time.time()))
        timedata = self.timedata
        self.dirpath = "datacollected_{}".format(timedata)
        self.cur_dir = os.path.dirname(os.path.realpath(__file__))
        self.save_dir = self.cur_dir + "/" + self.dirpath
        if not os.path.exists(self.save_dir):
            os.makedirs(self.save_dir)
        self._camera_transforms = [
            carla.Transform(carla.Location(x=0, z=2.8)),
            carla.Transform(carla.Location(x=0, z=2.8),
                            carla.Rotation(yaw=90)),
            carla.Transform(carla.Location(x=0, z=2.8),
                            carla.Rotation(yaw=180)),
            carla.Transform(carla.Location(x=0, z=2.8),
                            carla.Rotation(yaw=270))
        ]
        self._transform_index = 0
        self._sensors = [
            # ['sensor.camera.rgb', cc.Raw, 'Camera RGB'],
            ['sensor.camera.depth', cc.Raw, 'Camera Depth (Raw)']
            # ['sensor.camera.depth', cc.Depth, 'Camera Depth (Gray Scale)'],
            # ['sensor.camera.depth', cc.LogarithmicDepth, 'Camera Depth (Logarithmic Gray Scale)'],
            # ['sensor.camera.semantic_segmentation', cc.Raw, 'Camera Semantic Segmentation (Raw)'],
            # ['sensor.camera.semantic_segmentation', cc.CityScapesPalette, 'Camera Semantic Segmentation (CityScapes Palette)'],
            # ['sensor.lidar.ray_cast', None, 'Lidar (Ray-Cast)']
        ]
        self.fov = 90.0
        world = self._parent.get_world()
        bp_library = world.get_blueprint_library()
        print("setting image")
        for item in self._sensors:
            bp = bp_library.find(item[0])
            if item[0].startswith('sensor.camera'):
                bp.set_attribute('image_size_x', str(self.width))
                bp.set_attribute('image_size_y', str(self.height))
                bp.set_attribute('fov', str(self.fov))
                bp.set_attribute('image_size_x', '500')
                bp.set_attribute('image_size_y', '420')
                print("set image six")
            elif item[0].startswith('sensor.lidar'):
                bp.set_attribute('range', '5000')
                # bp.set_attribute('rotation_frequency', '20')
                bp.set_attribute('upper_fov', '15')
                bp.set_attribute('lower_fov', '-25')
                bp.set_attribute('channels', '64')
                bp.set_attribute('points_per_second', '1000000')
            item.append(bp)

        self.surface_shift_x = gap_x
        self.surface_shift_y = diplace_y
        self.lidar_points = None
        self.depth_image = None
Пример #5
0
    def convert_position_to_transform(position, actor_list=None):
        """
        Convert an OpenScenario position into a CARLA transform

        Not supported: Road, RelativeRoad, Lane, RelativeLane as the PythonAPI currently
                       does not provide sufficient access to OpenDrive information
                       Also not supported is Route. This can be added by checking additional
                       route information
        """

        if position.find('World') is not None:
            world_pos = position.find('World')
            x = float(world_pos.attrib.get('x', 0))
            y = float(world_pos.attrib.get('y', 0))
            z = float(world_pos.attrib.get('z', 0))
            yaw = math.degrees(float(world_pos.attrib.get('h', 0)))
            pitch = math.degrees(float(world_pos.attrib.get('p', 0)))
            roll = math.degrees(float(world_pos.attrib.get('r', 0)))
            if not OpenScenarioParser.use_carla_coordinate_system:
                y = y * (-1.0)
                yaw = yaw * (-1.0)
            return carla.Transform(
                carla.Location(x=x, y=y, z=z),
                carla.Rotation(yaw=yaw, pitch=pitch, roll=roll))

        elif ((position.find('RelativeWorld') is not None)
              or (position.find('RelativeObject') is not None)
              or (position.find('RelativeLane') is not None)):
            rel_pos = position.find('RelativeWorld') or position.find(
                'RelativeObject') or position.find('RelativeLane')

            # get relative object and relative position
            obj = rel_pos.attrib.get('object')
            obj_actor = None
            actor_transform = None

            if actor_list is not None:
                for actor in actor_list:
                    if actor.rolename == obj:
                        obj_actor = actor
                        actor_transform = actor.transform
            else:
                for actor in CarlaDataProvider.get_world().get_actors():
                    if 'role_name' in actor.attributes and actor.attributes[
                            'role_name'] == obj:
                        obj_actor = actor
                        actor_transform = obj_actor.get_transform()
                        break

            if obj_actor is None:
                raise AttributeError(
                    "Object '{}' provided as position reference is not known".
                    format(obj))

            # calculate orientation h, p, r
            is_absolute = False
            if rel_pos.find('Orientation') is not None:
                orientation = rel_pos.find('Orientation')
                is_absolute = (orientation.attrib.get('type') == "absolute")
                dyaw = math.degrees(float(orientation.attrib.get('h', 0)))
                dpitch = math.degrees(float(orientation.attrib.get('p', 0)))
                droll = math.degrees(float(orientation.attrib.get('r', 0)))

            if not OpenScenarioParser.use_carla_coordinate_system:
                dyaw = dyaw * (-1.0)

            yaw = actor_transform.rotation.yaw
            pitch = actor_transform.rotation.pitch
            roll = actor_transform.rotation.roll

            if not is_absolute:
                yaw = yaw + dyaw
                pitch = pitch + dpitch
                roll = roll + droll
            else:
                yaw = dyaw
                pitch = dpitch
                roll = droll

            # calculate location x, y, z
            # dx, dy, dz
            if (position.find('RelativeWorld')
                    is not None) or (position.find('RelativeObject')
                                     is not None):
                dx = float(rel_pos.attrib.get('dx', 0))
                dy = float(rel_pos.attrib.get('dy', 0))
                dz = float(rel_pos.attrib.get('dz', 0))

                if not OpenScenarioParser.use_carla_coordinate_system:
                    dy = dy * (-1.0)

                x = actor_transform.location.x + dx
                y = actor_transform.location.y + dy
                z = actor_transform.location.z + dz

            # dLane, ds, offset
            elif position.find('RelativeLane') is not None:
                dlane = float(rel_pos.attrib.get('dLane'))
                ds = float(rel_pos.attrib.get('ds'))
                offset = float(rel_pos.attrib.get('offset'))

                carla_map = CarlaDataProvider.get_map()
                relative_waypoint = carla_map.get_waypoint(
                    actor_transform.location)

                if dlane == 0:
                    wp = relative_waypoint
                elif dlane == -1:
                    wp = relative_waypoint.get_left_lane()
                elif dlane == 1:
                    wp = relative_waypoint.get_right_lane()
                if wp is None:
                    raise AttributeError(
                        "Object '{}' position with dLane={} is not valid".
                        format(obj, dlane))

                wp = wp.next(ds)[-1]

                # offset
                # Verschiebung von Mittelpunkt
                h = math.radians(wp.transform.rotation.yaw)
                x_offset = math.sin(h) * offset
                y_offset = math.cos(h) * offset

                if OpenScenarioParser.use_carla_coordinate_system:
                    x_offset = x_offset * (-1.0)
                    y_offset = y_offset * (-1.0)

                x = wp.transform.location.x + x_offset
                y = wp.transform.location.y + y_offset
                z = wp.transform.location.z

            return carla.Transform(
                carla.Location(x=x, y=y, z=z),
                carla.Rotation(yaw=yaw, pitch=pitch, roll=roll))

        # Not implemented
        elif position.find('Road') is not None:
            raise NotImplementedError("Road positions are not yet supported")
        elif position.find('RelativeRoad') is not None:
            raise NotImplementedError(
                "RelativeRoad positions are not yet supported")
        elif position.find('Lane') is not None:
            raise NotImplementedError("Lane positions are not yet supported")
        elif position.find('Route') is not None:
            raise NotImplementedError("Route positions are not yet supported")
        else:
            raise AttributeError("Unknown position")
Пример #6
0
def ndarray_to_rotation(array: np.ndarray) -> carla.Rotation:  # pylint: disable=no-member
    """Converts neural network friendly tensor back to `carla.Rotation`."""
    return carla.Rotation(*list(map(float, array)))  # pylint: disable=no-member
Пример #7
0
def RPY_to_carla_rotation(roll, pitch, yaw):
    return carla.Rotation(roll=math.degrees(roll),
                          pitch=-math.degrees(pitch),
                          yaw=-math.degrees(yaw))
client = carla.Client("localhost", 2000)
client.set_timeout(10.0)
world = client.load_world('Town06')

# set the weather
world = client.get_world()
weather = carla.WeatherParameters(cloudiness=10.0,
                                  precipitation=0.0,
                                  sun_altitude_angle=90.0)
world.set_weather(weather)

# set the spectator position for demo purpose
spectator = world.get_spectator()
spectator.set_transform(
    carla.Transform(carla.Location(x=-68.29, y=151.75, z=170.8),
                    carla.Rotation(pitch=-31.07, yaw=-90.868,
                                   roll=1.595)))  # plain ground

env = CARLA_ENV(world)
time.sleep(2)  # sleep for 2 seconds, wait the initialization to finish

# spawn a vehicle, here I choose a Tesla model
spawn_point = carla.Transform(
    carla.Location(x=-277.08, y=-15.39, z=4.94),
    carla.Rotation(pitch=0.000000, yaw=0, roll=0.000000))
model_name = "vehicle.tesla.model3"
model_uniquename = env.spawn_vehicle(
    model_name, spawn_point
)  # spawn the model and get the uniquename, the CARLA_ENV class will store the vehicle into vehicle actor list

time.sleep(5)
'''
Пример #9
0
# ======================================================================================

try:
    client = carla.Client("localhost", 2000)
    client.set_timeout(5.0)

    world = client.get_world()

    blueprint_library = world.get_blueprint_library()

    bp = blueprint_library.filter("model3")[0]
    print(bp)

    # spawn_point = random.choice(world.get_map().get_spawn_points())
    vehicle_spawn_point = carla.Transform(carla.Location(x=50, y=5, z=1),
                                          carla.Rotation(yaw=0.0))
    # spawn_point = carla.Transform(carla.Location(x=233, y=-12, z=1), carla.Rotation(yaw=-90))

    vehicle = world.spawn_actor(bp, vehicle_spawn_point)
    actor_list.append(vehicle)
    vehicle.set_autopilot(True)

    # vehicle.apply_control(carla.VehicleControl(throttle=1.0, steer=0.0))

    cam_bp = blueprint_library.find("sensor.camera.rgb")
    cam_bp.set_attribute("image_size_x", format(IM_WIDTH))
    cam_bp.set_attribute("image_size_y", format(IM_HEIGHT))
    cam_bp.set_attribute("fov", "90")

    fcam_spawn_point = carla.Transform(carla.Location(x=0.9, z=1.5),
                                       carla.Rotation(yaw=0.0))
Пример #10
0
def scenario(scenario_params, ctrl_params, carla_synchronous, eval_env,
             eval_stg):
    ego_vehicle = None
    agent = None
    spectator_camera = None
    other_vehicles = []
    record_spectator = False
    client, world, carla_map, traffic_manager = carla_synchronous

    try:
        map_reader = NaiveMapQuerier(world, carla_map, debug=True)
        online_config = OnlineConfig(record_interval=10,
                                     node_type=eval_env.NodeType)

        # Mock vehicles
        spawn_points = carla_map.get_spawn_points()
        blueprints = get_all_vehicle_blueprints(world)
        spawn_indices = [scenario_params.ego_spawn_idx
                         ] + scenario_params.other_spawn_ids
        other_vehicle_ids = []
        for k, spawn_idx in enumerate(spawn_indices):
            if k == 0:
                blueprint = world.get_blueprint_library().find(
                    'vehicle.audi.a2')
            else:
                blueprint = np.random.choice(blueprints)
            spawn_point = spawn_points[spawn_idx]
            spawn_point = shift_spawn_point(carla_map, k,
                                            scenario_params.spawn_shifts,
                                            spawn_point)
            # Prevent collision with road.
            spawn_point.location += carla.Location(0, 0, 0.5)
            vehicle = world.spawn_actor(blueprint, spawn_point)
            if k == 0:
                ego_vehicle = vehicle
            else:
                vehicle.set_autopilot(True, traffic_manager.get_port())
                if scenario_params.ignore_signs:
                    traffic_manager.ignore_signs_percentage(vehicle, 100.)
                if scenario_params.ignore_lights:
                    traffic_manager.ignore_lights_percentage(vehicle, 100.)
                if scenario_params.ignore_vehicles:
                    traffic_manager.ignore_vehicles_percentage(vehicle, 100.)
                if not scenario_params.auto_lane_change:
                    traffic_manager.auto_lane_change(vehicle, False)
                other_vehicles.append(vehicle)
                other_vehicle_ids.append(vehicle.id)

        frame = world.tick()
        agent = MidlevelAgent(
            ego_vehicle,
            map_reader,
            other_vehicle_ids,
            eval_stg,
            n_burn_interval=scenario_params.n_burn_interval,
            control_horizon=ctrl_params.control_horizon,
            prediction_horizon=ctrl_params.prediction_horizon,
            step_horizon=ctrl_params.step_horizon,
            n_predictions=ctrl_params.n_predictions,
            scene_builder_cls=TrajectronPlusPlusSceneBuilder,
            turn_choices=scenario_params.turn_choices,
            max_distance=scenario_params.max_distance,
            plot_boundary=False,
            log_agent=False,
            log_cplex=False,
            plot_scenario=True,
            plot_simulation=True,
            plot_overapprox=False,
            scene_config=online_config)
        agent.start_sensor()
        assert agent.sensor_is_listening
        """Move the spectator to the ego vehicle.
        The positioning is a little off"""
        state = agent.get_vehicle_state()
        goal = agent.get_goal()
        goal_x, goal_y = goal.x, -goal.y
        if goal.is_relative:
            location = carla.Location(x=state[0] + goal_x / 2.,
                                      y=state[1] - goal_y / 2.,
                                      z=state[2] + 50)
        else:
            location = carla.Location(x=(state[0] + goal_x) / 2.,
                                      y=(state[1] + goal_y) / 2.,
                                      z=state[2] + 50)
        # configure the spectator
        world.get_spectator().set_transform(
            carla.Transform(location,
                            carla.Rotation(pitch=-70, yaw=-90, roll=20)))
        record_spectator = False
        if record_spectator:
            # attach camera to spectator
            spectator_camera = attach_camera_to_spectator(world, frame)

        n_burn_frames = scenario_params.n_burn_interval * online_config.record_interval
        if ctrl_params.loop_type == LoopEnum.CLOSED_LOOP:
            run_frames = scenario_params.run_interval * online_config.record_interval
        else:
            run_frames = ctrl_params.control_horizon * online_config.record_interval - 1
        for idx in range(n_burn_frames + run_frames):
            control = None
            for ctrl in scenario_params.controls:
                if ctrl.interval[0] <= idx and idx <= ctrl.interval[1]:
                    control = ctrl.control
                    break
            frame = world.tick()
            agent.run_step(frame, control=control)

    finally:
        if spectator_camera:
            spectator_camera.destroy()
        if agent:
            agent.destroy()
        if ego_vehicle:
            ego_vehicle.destroy()
        for other_vehicle in other_vehicles:
            other_vehicle.destroy()

        if record_spectator == True:
            time.sleep(5)
        else:
            time.sleep(1)
    def reset(self):
        self.actor_list = []
        self.collision_hist = []

        self.depth = 50

        self.Obdist = 20
        self.ObdistL = 15
        self.ObdistR = 15

        self.azimuth = 0
        self.zrate = 0

        self.spawn_point = (self.world.get_map().get_spawn_points())[5]

        self.vehicle = self.world.spawn_actor(self.bp, self.spawn_point)
        self.vehicle.apply_control(
            carla.VehicleControl(throttle=0.0, steer=0.0))
        time.sleep(1)
        #vehicle.set_autopilot(True)  # if you just wanted some NPCs to drive.

        self.actor_list.append(self.vehicle)

        # https://carla.readthedocs.io/en/latest/cameras_and_sensors
        # get the blueprint for this sensor
        self.blueprint = self.blueprint_library.find('sensor.other.radar')

        self.blueprintOD = self.blueprint_library.find('sensor.other.obstacle')
        self.blueprintODside = self.blueprint_library.find(
            'sensor.other.obstacle')

        self.blueprintCD = self.blueprint_library.find(
            'sensor.other.collision')
        self.blueprintRGB = self.blueprint_library.find('sensor.camera.rgb')
        self.blueprintIMU = self.blueprint_library.find('sensor.other.imu')

        # change the dimensions of the image
        self.blueprintRGB.set_attribute('image_size_x', f'{self.im_width}')
        self.blueprintRGB.set_attribute('image_size_y', f'{self.im_height}')
        self.blueprintRGB.set_attribute('fov', '110')

        self.blueprint.set_attribute('horizontal_fov', '15')
        self.blueprint.set_attribute('vertical_fov', '5')
        self.blueprint.set_attribute('range', '15')

        self.blueprintOD.set_attribute('distance', '40')
        self.blueprintOD.set_attribute('debug_linetrace', 'False')
        self.blueprintOD.set_attribute('hit_radius', '2.5')

        self.blueprintODside.set_attribute('distance', '25')
        self.blueprintODside.set_attribute('debug_linetrace', 'False')
        self.blueprintODside.set_attribute('hit_radius', '2')

        # Adjust sensor relative to vehicle
        spawn_point = carla.Transform(carla.Location(x=2.5, z=0.7))
        spawn_pointD = carla.Transform(carla.Location(x=2.5, z=4))
        spawn_pointL = carla.Transform(carla.Location(x=2.5, z=3),
                                       carla.Rotation(0, -50, 0))
        spawn_pointR = carla.Transform(carla.Location(x=2.5, z=3),
                                       carla.Rotation(0, 50, 0))

        spawn_pointcam = carla.Transform(carla.Location(x=0, z=2))

        # spawn the sensor and attach to vehicle.
        #self.sensor = self.world.spawn_actor(self.blueprint, spawn_point, attach_to=self.vehicle)

        self.sensorOD = self.world.spawn_actor(self.blueprintOD,
                                               spawn_pointD,
                                               attach_to=self.vehicle)
        self.sensorODR = self.world.spawn_actor(self.blueprintODside,
                                                spawn_pointR,
                                                attach_to=self.vehicle)
        self.sensorODL = self.world.spawn_actor(self.blueprintODside,
                                                spawn_pointL,
                                                attach_to=self.vehicle)

        self.sensorCD = self.world.spawn_actor(self.blueprintCD,
                                               spawn_point,
                                               attach_to=self.vehicle)
        self.sensorRGB = self.world.spawn_actor(self.blueprintRGB,
                                                spawn_pointcam,
                                                attach_to=self.vehicle)
        self.sensorIMU = self.world.spawn_actor(self.blueprintIMU,
                                                spawn_point,
                                                attach_to=self.vehicle)

        # add sensor to list of actors
        #self.actor_list.append(self.sensor)
        self.actor_list.append(self.sensorOD)
        self.actor_list.append(self.sensorODL)
        self.actor_list.append(self.sensorODR)
        self.actor_list.append(self.sensorCD)
        self.actor_list.append(self.sensorRGB)
        self.actor_list.append(self.sensorIMU)

        # do something with this sensor

        #self.sensor.listen(lambda data: self.process_data(data))

        self.sensorOD.listen(lambda dataOD: self.register_obstacle(dataOD))
        self.sensorODL.listen(lambda dataODL: self.register_obstacleL(dataODL))
        self.sensorODR.listen(lambda dataODR: self.register_obstacleR(dataODR))

        self.sensorCD.listen(lambda dataCD: self.register_collision(dataCD))

        #self.sensorRGB.listen(lambda dataRGB: self.process_img(dataRGB))

        self.sensorIMU.listen(lambda dataIMU: self.process_IMU(dataIMU))
class NoSignalJunctionCrossing(BasicScenario):
    """
    Implementation class for
    'Non-signalized junctions: crossing negotiation' scenario,
    Traffic Scenario 10.

    Location    :   Town03
    """

    # ego vehicle parameters
    _ego_vehicle_model = 'vehicle.lincoln.mkz2017'
    _ego_vehicle_start = carla.Transform(
        carla.Location(x=-74.32, y=-50, z=0.5), carla.Rotation(yaw=270))
    _ego_vehicle_max_velocity = 20
    _ego_vehicle_driven_distance = 105

    # other vehicle
    _other_vehicle_model = 'vehicle.tesla.model3'
    _other_vehicle_start = carla.Transform(
        carla.Location(x=-105, y=-136, z=0.5), carla.Rotation(yaw=0))
    _other_vehicle_max_brake = 1.0
    _other_vehicle_target_velocity = 15

    def __init__(self, world, debug_mode=False):
        """
        Setup all relevant parameters and create scenario
        and instantiate scenario manager
        """
        self.other_vehicles = [
            setup_vehicle(world, self._other_vehicle_model,
                          self._other_vehicle_start)
        ]
        self.ego_vehicle = setup_vehicle(world,
                                         self._ego_vehicle_model,
                                         self._ego_vehicle_start,
                                         hero=True)
        super(NoSignalJunctionCrossing,
              self).__init__(name="NoSignalJunctionCrossing",
                             town="Town03",
                             world=world,
                             debug_mode=debug_mode)

    def _create_behavior(self):
        """
        After invoking this scenario, it will wait for the user
        controlled vehicle to enter the start region,
        then make a traffic participant to accelerate
        until it is going fast enough to reach an intersection point.
        at the same time as the user controlled vehicle at the junction.
        Once the user controlled vehicle comes close to the junction,
        the traffic participant accelerates and passes through the junction.
        After 60 seconds, a timeout stops the scenario.
        """

        # Creating leaf nodes
        start_other_trigger = InTriggerRegion(self.ego_vehicle, -80, -70, -75,
                                              -60)

        sync_arrival = SyncArrival(self.other_vehicles[0], self.ego_vehicle,
                                   carla.Location(x=-74.63, y=-136.34))

        pass_through_trigger = InTriggerRegion(self.ego_vehicle, -90, -70,
                                               -124, -119)

        keep_velocity_other = KeepVelocity(self.other_vehicles[0],
                                           self._other_vehicle_target_velocity)

        stop_other_trigger = InTriggerRegion(self.other_vehicles[0], -45, -35,
                                             -140, -130)

        stop_other = StopVehicle(self.other_vehicles[0],
                                 self._other_vehicle_max_brake)

        end_condition = InTriggerRegion(self.ego_vehicle, -90, -70, -170, -156)

        # Creating non-leaf nodes
        root = py_trees.composites.Parallel(
            policy=py_trees.common.ParallelPolicy.SUCCESS_ON_ONE)
        scenario_sequence = py_trees.composites.Sequence()
        sync_arrival_parallel = py_trees.composites.Parallel(
            policy=py_trees.common.ParallelPolicy.SUCCESS_ON_ONE)
        keep_velocity_other_parallel = py_trees.composites.Parallel(
            policy=py_trees.common.ParallelPolicy.SUCCESS_ON_ONE)

        # Building tree
        root.add_child(scenario_sequence)
        scenario_sequence.add_child(start_other_trigger)
        scenario_sequence.add_child(sync_arrival_parallel)
        scenario_sequence.add_child(keep_velocity_other_parallel)
        scenario_sequence.add_child(stop_other)
        scenario_sequence.add_child(end_condition)
        sync_arrival_parallel.add_child(sync_arrival)
        sync_arrival_parallel.add_child(pass_through_trigger)
        keep_velocity_other_parallel.add_child(keep_velocity_other)
        keep_velocity_other_parallel.add_child(stop_other_trigger)

        return root

    def _create_test_criteria(self):
        """
        A list of all test criteria will be created that is later used
        in parallel behavior tree.
        """
        criteria = []

        # Adding checks for ego vehicle
        collision_criterion_ego = CollisionTest(self.ego_vehicle)
        driven_distance_criterion = DrivenDistanceTest(
            self.ego_vehicle, self._ego_vehicle_driven_distance)
        criteria.append(collision_criterion_ego)
        criteria.append(driven_distance_criterion)

        # Add approriate checks for other vehicles
        for vehicle in self.other_vehicles:
            collision_criterion = CollisionTest(vehicle)
            criteria.append(collision_criterion)

        return criteria
Пример #13
0
def main():
    actor_list = []
    pygame.init()

    display = pygame.display.set_mode((800, 600),
                                      pygame.HWSURFACE | pygame.DOUBLEBUF)
    font = get_font()
    clock = pygame.time.Clock()

    client = carla.Client('localhost', 2000)
    client.set_timeout(2.0)

    world = client.get_world()

    try:
        m = world.get_map()
        start_pose = random.choice(m.get_spawn_points())
        waypoint = m.get_waypoint(start_pose.location)

        blueprint_library = world.get_blueprint_library()

        vehicle = world.spawn_actor(
            random.choice(blueprint_library.filter('vehicle.*')), start_pose)
        actor_list.append(vehicle)
        vehicle.set_simulate_physics(False)

        camera_rgb = world.spawn_actor(
            blueprint_library.find('sensor.camera.rgb'),
            carla.Transform(carla.Location(x=-5.5, z=2.8),
                            carla.Rotation(pitch=-15)),
            attach_to=vehicle)
        actor_list.append(camera_rgb)

        camera_semseg = world.spawn_actor(
            blueprint_library.find('sensor.camera.semantic_segmentation'),
            carla.Transform(carla.Location(x=-5.5, z=2.8),
                            carla.Rotation(pitch=-15)),
            attach_to=vehicle)
        actor_list.append(camera_semseg)

        # Create a synchronous mode context.
        with CarlaSyncMode(world, camera_rgb, camera_semseg) as sync_mode:
            while True:
                if should_quit():
                    return
                clock.tick()

                # Advance the simulation and wait for the data.
                snapshot, image_rgb, image_semseg = sync_mode.tick(timeout=2.0)

                # Choose the next waypoint and update the car location.
                waypoint = random.choice(waypoint.next(1.5))
                vehicle.set_transform(waypoint.transform)

                image_semseg.convert(carla.ColorConverter.CityScapesPalette)
                fps = 1.0 / snapshot.timestamp.delta_seconds

                # Draw the display.
                draw_image(display, image_rgb)
                draw_image(display, image_semseg, blend=True)
                display.blit(
                    font.render('% 5d FPS (real)' % clock.get_fps(), True,
                                (255, 255, 255)), (8, 10))
                display.blit(
                    font.render('% 5d FPS (simulated)' % fps, True,
                                (255, 255, 255)), (8, 28))
                pygame.display.flip()

    finally:

        print('destroying actors.')
        for actor in actor_list:
            actor.destroy()

        pygame.quit()
        print('done.')
Пример #14
0
def main():
    global actor_list, flag, frame
    client = carla.Client(host, port)
    client.set_timeout(5.0)

    try:
        world = client.get_world()
    except:
        print("ERROR: Cannot get world !")
        import sys
        sys.exit()

    set_weather(world)
    #carla.TrafficLight.set_green_time(float(999))
    #carla.TrafficLight.set_red_time(0)

    try:
        blueprint_library = world.get_blueprint_library()

        # add vehicle
        vehicle = add_vehicle_component(world, blueprint_library)
        # put the vehicle to drive around.
        #vehicle.set_autopilot(True)

        map = world.get_map()
        spawn_points = map.get_spawn_points()
        destination = spawn_points[random.randint(0,
                                                  len(spawn_points) -
                                                  1)].location

        agent = BasicAgent(vehicle, target_speed=20)
        agent.set_destination((destination.x, destination.y, destination.z))

        dao = GlobalRoutePlannerDAO(map)
        planner = GlobalRoutePlanner(dao)
        planner.setup()

        vehicle.set_simulate_physics(False)

        my_location = vehicle.get_location()
        trace_list = planner.trace_route(my_location, destination)
        waypoints = []

        for (waypoint, road_option) in trace_list:
            waypoints.append(waypoint)
        next_point = waypoints[0].transform

        camera = add_camera_component(world, blueprint_library, vehicle)
        camera.stop()

        while True:
            vehicle.set_transform(next_point)
            my_location = next_point.location
            #my_location = vehicle.get_location()
            me2destination = my_location.distance(destination)
            if me2destination < 50:
                destination = spawn_points[random.randint(
                    0,
                    len(spawn_points) - 1)].location
                #agent.set_destination((destination.x,destination.y,destination.z))
                print("destination change !!!")

            trace_list = planner.trace_route(my_location, destination)
            waypoints = []
            for (waypoint, road_option) in trace_list:
                waypoints.append(waypoint)

            print(len(waypoints))
            if len(waypoints) < 5:
                print('my_location:', my_location.x, my_location.y,
                      my_location.z)
                print('destination:', destination.x, destination.y,
                      destination.z)
                print('me2destination:', me2destination)

            next_point = waypoints[random.randint(0,
                                                  min(len(waypoints) - 1,
                                                      50))].transform

            camera.listen(lambda image: deal_image(image))
            while not flag:
                sleep(0.01)
            camera.stop()
            flag = False

            get_instruct(waypoints)

            for waypoint in waypoints[0:min(len(waypoints) - 1, 30)]:
                box_point = carla.Location(waypoint.transform.location.x,
                                           waypoint.transform.location.y,
                                           waypoint.transform.location.z - 0.4)
                box = carla.BoundingBox(box_point,
                                        carla.Vector3D(x=2, y=0.1, z=0.5))
                rotation = carla.Rotation(
                    pitch=waypoint.transform.rotation.pitch,
                    yaw=waypoint.transform.rotation.yaw,
                    roll=waypoint.transform.rotation.roll)
                world.debug.draw_box(box=box,
                                     rotation=rotation,
                                     thickness=1.2,
                                     life_time=0)

            sleep(0.3)
            camera.listen(lambda image: deal_image(image))
            while not flag:
                sleep(0.01)
            camera.stop()
            flag = False

            sleep(1.0)

    finally:
        print('destroying actors')
        for actor in actor_list:
            actor.destroy()
        actor_list = []
        print('done.')
Пример #15
0
def main():
    actor_list = []

    try:
        client = carla.Client("localhost", 2000)
        client.set_timeout(10.0)

        # Town05, Town07 has highway, parking, standard roads
        world = client.get_world()

        # blueprint library
        blueprint_library = world.get_blueprint_library()

        # Agent = Dodge Charge Police
        bp = blueprint_library.find('vehicle.dodge_charger.police')

        # GNSS Sensor
        gnss_bp = world.get_blueprint_library().find('sensor.other.gnss')
        gnss_location = carla.Location(0, 0, 0)
        gnss_rotation = carla.Rotation(0, 0, 0)
        gnss_transform = carla.Transform(gnss_location, gnss_rotation)
        gnss_bp.set_attribute(
            "sensor_tick",
            str(1.0))  # Wait time for sensor to update (1.0 = 1s)

        # Spawn Agent
        transform = carla.Transform(
            carla.Location(
                # x=121.61898803710938, y=187.5887451171875, z=1.0), carla.Rotation(yaw=180) # Location in Town02
                # x=130.81553649902344, y=65.8092269897461, z=1.0), carla.Rotation(yaw=-0)  # Location in Town03
                # x = 117.7, y = 62.5, z = 1.0), carla.Rotation(yaw=-0)  # Location in Town03
                # x = 82.0, y = 8.2, z = 1.0), carla.Rotation(yaw=-0)  # Location in Town03
                x=157.0,
                y=-148.0,
                z=1.0),
            carla.Rotation(
                yaw=-0)  # Location in Town03 - Tunnel Entrance (North)
        )
        vehicle = world.spawn_actor(bp, transform)
        actor_list.append(vehicle)
        print('created %s' % vehicle.type_id)

        # Spawn Sensors
        gnss = world.spawn_actor(gnss_bp, gnss_transform, attach_to=vehicle)
        actor_list.append(gnss)
        print('created %s' % gnss.type_id,
              "\n# # # # # # # # # # # # # # # # # # # # # #")

        # Get Town Map name
        town = str(world.get_map().name)

        # Activate GNSS Sensor and save current location in GNSS_DATA.parquet
        print(
            "Starting GNSS sensor\n# # # # # # # # # # # # # # # # # # # # # #"
        )
        gnss.listen(lambda event: gnss_live_location(event))
        time.sleep(2)
        print(
            "Current location acquired.\n# # # # # # # # # # # # # # # # # # # # # #"
        )

        # Current Location - Loads it from GNSS_DATA.parquet
        gnss_data = pd.read_parquet("GNSS_DATA.parquet")
        current_loc = (gnss_data.loc[0, 'lat'], gnss_data.loc[0, 'lon'],
                       gnss_data.loc[0, 'alt'])

        # Fixed Destination (CARLA Location of type x, y, z)
        # destination = (127.02777862548828, 306.4728088378906, 1.0)  # Fixed Location in Town02
        destination = (-20.639827728271484, -142.1471405029297, 1.0
                       )  # Fixed Location in Town03
        # destination = (118.0, -201.5, 1.0)  # Fixed Location in Town03

        # Random Destination (CARLA Location)
        # If choosing this option instead of the fixed destination make sure to
        # change dest_fixed=False in the code below
        # destination = random.choice(world.world.get_map().get_spawn_points())

        # Get shortest path and visualize it
        df_carla_path = process_nav_a2b(
            world, town, current_loc, destination,
            dest_fixed=True)  # put dest_fixed=False if random location

        # Vehicle autopilot
        vehicle.set_autopilot(
            False
        )  # Not activated if False (Needs to be False for teleportation)

        # Vehicle physics
        vehicle.set_simulate_physics(False)  # For teleportation put False

        # Gather and send waypoints one by one to vehicle
        print("Driving to destination...")
        for index, row in df_carla_path.iterrows():
            target_x = row["x"]
            target_y = row["y"]
            target_z = row["z"]

            # Find next waypoint 2 meters ahead.
            target_waypoint = carla.Transform(
                carla.Location(x=target_x, y=target_y,
                               z=target_z + 1))  # +1 in z for teleport

            # Teleport the vehicle.
            vehicle.set_transform(target_waypoint)

            time.sleep(1)

        print("# # # # # # # # # # # # # # # # # # # # # #\n"
              "You have arrived at your destination.\n")

    finally:
        print('\nDestroying actors.')
        client.apply_batch([carla.command.DestroyActor(x) for x in actor_list])
        if os.path.exists("GNSS_DATA.parquet"):
            print('Destroying GNSS_DATA.parquet.')
            os.remove("GNSS_DATA.parquet")
        print('Done.')
Пример #16
0
def get_transform(vehicle_location, angle, d=6.4):
    a = math.radians(angle)
    location = carla.Location(d * math.cos(a), d * math.sin(a),
                              2.0) + vehicle_location
    return carla.Transform(location, carla.Rotation(yaw=180 + angle,
                                                    pitch=-15))
    def __init__(self, parent_actor, hud):
        self.sensor = None
        self.surface = None
        self._parent = parent_actor
        self.hud = hud
        self.recording = False
        self._camera_transforms = [
            carla.Transform(carla.Location(x=-5.5, z=2.8), carla.Rotation(pitch=-15)),
            carla.Transform(carla.Location(x=1.6, z=1.7))]
        self.transform_index = 1
        self.sensors = [
            ['sensor.camera.rgb', cc.Raw, 'Camera RGB'],
            ['sensor.camera.depth', cc.Raw, 'Camera Depth (Raw)'],
            ['sensor.camera.depth', cc.Depth, 'Camera Depth (Gray Scale)'],
            ['sensor.camera.depth', cc.LogarithmicDepth, 'Camera Depth (Logarithmic Gray Scale)'],
            ['sensor.camera.semantic_segmentation', cc.Raw, 'Camera Semantic Segmentation (Raw)'],
            ['sensor.camera.semantic_segmentation', cc.CityScapesPalette,
                'Camera Semantic Segmentation (CityScapes Palette)'],
            ['sensor.lidar.ray_cast', None, 'Lidar (Ray-Cast)']]
        world = self._parent.get_world()
        bp_library = world.get_blueprint_library()
        for item in self.sensors:
            bp = bp_library.find(item[0])
            if item[0].startswith('sensor.camera'):
                bp.set_attribute('image_size_x', str(hud.dim[0]))
                bp.set_attribute('image_size_y', str(hud.dim[1]))
            elif item[0].startswith('sensor.lidar'):
                bp.set_attribute('range', '5000')
                ## rotation freq
                bp.set_attribute('rotation_frequency', '20')
            item.append(bp)
        self.index = None
        
        ########################################################################
        ## create custom sensors for collecting rgb image and Lidar data
        #'''
        self.sensor_rgb = None
        self.sensor_lidar = None
        # listen and record control signals on each image
        self.control_listener = None
        
        print("init custum rgb and Lidar sensors")
        
        if self.sensor_rgb is not None:
            self.sensor_rgb.destroy()
            #self.surface = None
        self.sensor_rgb = self._parent.get_world().spawn_actor(
            self.sensors[0][-1],    # rgb image
            self._camera_transforms[self.transform_index],
            attach_to=self._parent)
        # We need to pass the lambda a weak reference to self to avoid
        # circular reference.
        weak_self = weakref.ref(self)
        self.sensor_rgb.listen(lambda image: CameraManager._parse_image_rgblidar(weak_self, image, 0))
        #'''
        
        ## Lidar
        #'''
        #print(self.sensors[6])
        #['sensor.lidar.ray_cast', None, 'Lidar (Ray-Cast)', <carla.libcarla.ActorBlueprint object at 0x7f1b038fd570>]
        #assert(0)
        if self.sensor_lidar is not None:
            self.sensor_lidar.destroy()
            #self.surface = None
        self.sensor_lidar = self._parent.get_world().spawn_actor(
            self.sensors[6][-1],    # Lidar image
            self._camera_transforms[self.transform_index],
            attach_to=self._parent)
        # We need to pass the lambda a weak reference to self to avoid
        # circular reference.
        #weak_self = weakref.ref(self)
        self.sensor_lidar.listen(lambda image: CameraManager._parse_image_rgblidar(weak_self, image, 6))
        
        #'''
        
        ## Control signals
        '''
        if self.control_listener is not None:
            self.control_listener.destroy()
        
        # sync with image input, so use sensors[0]
        self.control_listener = self._parent.get_world().spawn_actor(
            self.sensors[0][-1],    # rgb image
            self._camera_transforms[self.transform_index],
            attach_to=self._parent)

        self.control_listener.listen(lambda image: CameraManager._parse_image_control(weak_self, image, world))
        '''
        
        ## image saver
        
        self.out_dir = "_out_data"
        self.episode_count = 0
        
        #print('%s/ep_%d/' % (self.out_dir, self.episode_count))
        #assert(0)
        
        self.saver = [
            # BufferedImageSaver(f'{self.out_dir}5/ep_{self.episode_count:03d}/CameraDepth/',
            #                    1000, WINDOW_HEIGHT, WINDOW_WIDTH, 1),
            BufferedImageSaver('%s/ep_%d/' % (self.out_dir, self.episode_count),
                               100, WINDOW_HEIGHT, WINDOW_WIDTH, 3, 'CameraRGB'),
            BufferedImageSaver('%s/ep_%d/' % (self.out_dir, self.episode_count),
                               100, 8000, 1, 3, 'Lidar')
                               ## TODO: current limit 5000 points, might be more
            ]
Пример #18
0
    import pygame
except ImportError:
    raise RuntimeError('cannot import pygame, make sure pygame package is installed')

try:
    import numpy as np
except ImportError:
    raise RuntimeError('cannot import numpy, make sure numpy package is installed')


AUTOPILOT_DESTINATION = carla.Location(x=126.097374, y=125.237732, z=0.342617)

# Enable this to place the camera on dirvers view.
DRIVERVIEW_CAMERA = False

MAIN_CAMERA_TRANSFORM = carla.Transform(carla.Location(x=1, z=1), carla.Rotation(pitch=-50, yaw=180))

if DRIVERVIEW_CAMERA == True:
    MAIN_CAMERA_TRANSFORM = carla.Transform(carla.Location(x=5.3, y=0.35, z=0.9), carla.Rotation(yaw=180, pitch=-5))

MIRROR_W = 350
MIRROR_H = 200

DISPLAY_W = 1200
DISPLAY_H = 1000

SCREEN_W = MIRROR_W*2+DISPLAY_W
SCREEN_H = max(DISPLAY_H, MIRROR_H)

FRONT_W = 300
FRONT_H = 60
Пример #19
0
def main():

    actor_list = []
    verboseIsEnabled = None
    try:
        """
        Section for starting the client and connecting to the server
        """
        client = carla.Client('localhost', 2000)
        client.set_timeout(2.0)

        for arg in sys.argv:
            if (arg == '--verbose'):
                verboseIsEnabled = True

        if verboseIsEnabled:
            print('client version: %s' % client.get_client_version())
            print('server version: %s' % client.get_server_version())
            print('client to server connection status: {}'.format(client.get_server_version()))

            print('Retrieving the world data from server...')

        world = client.get_world()
        if verboseIsEnabled:
            print('{} \n'.format(world))

        """
        Section for retrieving the blueprints and spawn the actors
        """
        blueprint_library = world.get_blueprint_library()
        if verboseIsEnabled:
            print('\nRetrieving CARLA blueprint library...')
            print('\nobject: %s\n\nblueprint methods: %s\n\nblueprint list:' % (type(blueprint_library), dir(blueprint_library)) )
            for blueprint in blueprint_library:
                print(blueprint)

        audi_blueprint = blueprint_library.find('vehicle.audi.tt')
        print('\n%s\n' % audi_blueprint)

        color = '191,191,191'
        audi_blueprint.set_attribute('color', color)

        # Place measured in map 'carissma_scenario' 32
        transform = carla.Transform(
            carla.Location(x=32.4, y=76.199997, z=39.22),
            carla.Rotation(yaw=0.0))

        vehicleEgo = world.spawn_actor(audi_blueprint, transform)
        actor_list.append(vehicleEgo)
        print('created %s' % vehicleEgo.type_id)

	transform = carla.Transform(
	carla.Location(
		x=88.2, y=93.5,
		z=38.98),carla.Rotation(yaw=-90.0)
	)

	color = random.choice(audi_blueprint.get_attribute('color').recommended_values)
	audi_blueprint.set_attribute('color', color)

        vehicleObservable = world.spawn_actor(audi_blueprint, transform)
        actor_list.append(vehicleObservable)
        print('created %s' % vehicleObservable.type_id)

        '''
        time.sleep(1.5)
        vehicleEgo.apply_control(carla.VehicleControl(throttle=.3))
	elapsed_time = 0
	time_increment = 0.1
        '''

        # Find the blueprint of the sensor.
        blueprint = world.get_blueprint_library().find('sensor.camera.rgb')
        # Modify the attributes of the blueprint to set image resolution and field of view.
        blueprint.set_attribute('image_size_x', '1920')
        blueprint.set_attribute('image_size_y', '1080')
        blueprint.set_attribute('fov', '110')
        # Provide the position of the sensor relative to the vehicle.
        # transform = carla.Transform(carla.Location(x=0.8, z=1.7))
        transformFrontCam = carla.Transform(carla.Location(x=0.8, z=1.5), carla.Rotation(yaw=0))
        transformRearCam = carla.Transform(carla.Location(x=-1.8, z=1), carla.Rotation(yaw=180))

        # Tell the world to spawn the sensor, don't forget to attach it to your vehicle actor.
        frontCam = world.spawn_actor(blueprint, transformFrontCam, attach_to=vehicleEgo)
        rearCam = world.spawn_actor(blueprint, transformRearCam , attach_to=vehicleEgo)

        # Subscribe to the sensor stream by providing a callback function, this function is
        # called each time a new image is generated by the sensor.
        frontCam.listen(lambda image: image.save_to_disk('output/front/%06d.png' % image.frame_number))
        rearCam.listen(lambda image: image.save_to_disk('output/rear/%06d.png' % image.frame_number))
        actor_list.append(frontCam)
        actor_list.append(rearCam)

        time.sleep(5)
        vehicleEgo.apply_control(carla.VehicleControl(throttle=.7))
        time.sleep(3.8)
        vehicleObservable.apply_control(carla.VehicleControl(throttle=1))
        time.sleep(2.6)
        vehicleEgo.apply_control(carla.VehicleControl(hand_brake=True))
        time.sleep(3.5)

    finally:
        print('destroying actors')
        for actor in actor_list:
            actor.destroy()
        print('done.')
Пример #20
0
def main(args):
    actor_list = []
    sensor_list = []
    pygame.init()

    display = pygame.display.set_mode(
        (SCREEN_W, SCREEN_H),
        pygame.HWSURFACE | pygame.DOUBLEBUF)
    fill_rect = pygame.Rect((0,0,MIRROR_W, SCREEN_H/2))
    pygame.display.set_caption('CARLA Scenarios')
    font = get_font()
    font_big = get_font(font_size=30)
    clock = pygame.time.Clock()

    client = carla.Client('localhost', 2000)
    client.set_timeout(5.0)

    world = client.get_world()
    world = client.reload_world()

    config = read_json_config(args.spawn_config)

    # This agent will drive the autopilot to certain destination
    behaviour_agent = None
    is_agent_controlled = False
    prev_agent_autopilot_enabled = False

    scenario_instance = None
    scenario_class = None

    try:
        m = world.get_map()

        sx, sy, sz = float(config['ego_actor']['x']), float(config['ego_actor']['y']), float(config['ego_actor']['z'])
        syaw, spitch, sroll = float(config['ego_actor']['yaw']), float(config['ego_actor']['pitch']), float(config['ego_actor']['roll'])
        s_loc = carla.Location(sx, sy, sz+1)
        s_rot = carla.Rotation(yaw=syaw, pitch=spitch, roll=sroll)
        start_pose = carla.Transform(location=s_loc, rotation=s_rot)

        #spawn car
        blueprint_library = world.get_blueprint_library()
        car_bp = blueprint_library.find(config['ego_actor']['actor_type'])

        #car_bp.set_attribute('color','255,20,147')
        #car_bp.set_attribute('color','158,255,0')
        car_bp.set_attribute('role_name', 'hero')
        vehicle = world.spawn_actor(
            car_bp,
            start_pose)
        #print(vehicle.attributes.get('role_name'))
        actor_list.append(vehicle)

        #spawn spectator cam
        cam_bp = blueprint_library.find('sensor.camera.rgb')
        cam_bp.set_attribute('image_size_x', str(SCREEN_W))
        cam_bp.set_attribute('image_size_y', str(SCREEN_H))
        #cam_bp.set_attribute('sensor_tick',str(FREQ))
        camera_rgb = world.spawn_actor(
            cam_bp,
            MAIN_CAMERA_TRANSFORM,
            attach_to=vehicle,
            attachment_type=carla.AttachmentType.SpringArm)

        #every sensor must be registered by
        #appending to actor list AND sensor list
        actor_list.append(camera_rgb)
        sensor_list.append(camera_rgb)

        #rearview cam
        #camera_front = spawn.spawn_rgb(vehicle, world, img_x=FRONT_W, img_y=FRONT_H, hz=FREQ, pitch=-12)
        cam_bp.set_attribute('image_size_x', str(FRONT_W))
        cam_bp.set_attribute('image_size_y', str(FRONT_H))
        cam_bp.set_attribute('fov', str(60))
        mirror_rear = world.spawn_actor(
            cam_bp,
            carla.Transform(carla.Location(x=-2, z=1.3), carla.Rotation(yaw=-180)),
            attach_to=vehicle)
        actor_list.append(mirror_rear)
        sensor_list.append(mirror_rear)

        #spawn left mirror
        cam_bp.set_attribute('image_size_x', str(MIRROR_W))
        cam_bp.set_attribute('image_size_y', str(MIRROR_H))
        cam_bp.set_attribute('fov', str(70))

        mirror_left = world.spawn_actor(
            cam_bp,
            carla.Transform(carla.Location(y=-1.1, z=1.0), carla.Rotation(yaw=-160)),
            attach_to=vehicle)

        #every sensor must be registered by
        #appending to actor list AND sensor list
        actor_list.append(mirror_left)
        sensor_list.append(mirror_left)

        #spawn right mirror
        cam_bp.set_attribute('image_size_x', str(MIRROR_W))
        cam_bp.set_attribute('image_size_y', str(MIRROR_H))
        cam_bp.set_attribute('fov', str(70))
        mirror_right = world.spawn_actor(
            cam_bp,
            carla.Transform(carla.Location(y=1.1, z=1.0), carla.Rotation(yaw=160)),
            attach_to=vehicle)

        #every sensor must be registered by
        #appending to actor list AND sensor list
        actor_list.append(mirror_right)
        sensor_list.append(mirror_right)

        world.player = vehicle
        controller = DualControl(vehicle, world=world, start_in_autopilot=False, agent_controlled=True)

        if args.scenario == True:
            scenario_config_json = read_json_config(args.scenario_config)
            scenario_class = scenario_config_json["class"]
            if scenario_class == "BikeCrossing":
                scenario_instance = BikeCrossing()
            elif scenario_class == "CarCrashScenario":
                scenario_instance = CarCrashScenario()
            elif scenario_class == "PedestrianCrossing":
                scenario_instance = PedestrianCrossing()

            scenario_instance.load_config(args.scenario_config)
            scenario_instance.spawn_npcs()
            trigger_distances = scenario_instance.get_distances()
            beep = pygame.mixer.Sound('assets/sounds/bell.wav')
            wheel_icon = pygame.image.load('assets/icons/steering wheel.png').convert_alpha()
            sound_time = 0
            flash_time = 0

        #initial scenario stage
        flash_on = False
        stage = 0

        #initial hlc
        hlc = 2

        if DRIVERVIEW_CAMERA == True:
            camera_rgb.set_transform(MAIN_CAMERA_TRANSFORM)

        # Create a synchronous mode context.
        #SENSORS SHOULD BE PASSED IN THE SAME ORDER AS IN ACTOR_LIST
        with CarlaSyncMode(world, vehicle, m, *sensor_list, fps=FREQ, record=args.record, scenario=args.scenario) as sync_mode:

            record_start_flag = False
            yaw_prev = None
            while True:

                if should_quit():
                    return

                #print(sync_mode._queues)
                clock.tick()

                # Advance the simulation and wait for the data.
                tick_data = sync_mode.tick(timeout=2.0, hlc=hlc)
                snapshot = tick_data[0]
                image_rgb = tick_data[1]

                image_front = tick_data[2]
                image_mirror_left = tick_data[3]
                image_mirror_right = tick_data[4]


                #parse for control input and run vehicle
                hlc = controller.parse_events(vehicle, clock)
                control_states = vehicle.get_control()

                #draw trail based on hlc
                #sync_mode.draw_trail(hlc)

                v = vehicle.get_velocity()
                trans = vehicle.get_transform()
                #print('{:.3f}, {:.3f}'.format(trans.location.x, trans.location.y))

                
                closest_wp = m.get_waypoint(trans.location)
                draw_loc = closest_wp.transform.location+carla.Location(z=2)
                #sync_mode.world.debug.draw_point(draw_loc, life_time=0.05)
                #print(closest_wp.is_junction, len(closest_wp.next(0.5)))

                #track SECOND waypoint in queue as first does not shift with lc command
                second_wp = sync_mode.waypoint_queue[1]
                heading_error = second_wp.transform.rotation.yaw - trans.rotation.yaw
                heading_error = util.angle_wrap(heading_error)
                delta_x, delta_y = util.car_frame_deltas(trans, second_wp.transform.location)

                vx, vy = util.measure_forward_velocity(v, trans.rotation, return_both=True)
                vx_kph = vx*3.6
                curvature = util.curvature(sync_mode.waypoint_queue[1].transform, sync_mode.waypoint_queue[2].transform)

                #lateral accel
                #r = 1/(curvature + 1e-6)
                #print(vx**2/r)

                if len(sync_mode.vehicles_close)>0:
                    #print(sync_mode.vehicles_close[0][0], sync_mode.vehicles_close[0][1].__str__())
                    dist_to_car = max(sync_mode.vehicles_close[0][0] - 4.5, 0)
                else:
                    dist_to_car = 50.0

                if len(sync_mode.pedestrians_close)>0:
                    #print(sync_mode.vehicles_close[0][0], sync_mode.vehicles_close[0][1].__str__())
                    dist_to_walker = max(sync_mode.pedestrians_close[0][0] - 4.5, 0)
                else:
                    dist_to_walker = 50.0

                affordance = (heading_error, delta_y, curvature, dist_to_car, dist_to_walker)



                '''
                behaviour agent begin
                '''
                if controller._agent_autopilot_enabled == True:
                    if prev_agent_autopilot_enabled == False:
                        # Init the agent
                        behaviour_agent = BehaviorAgent(vehicle, behavior="normal")
                        # Set agent's destination  
                        behaviour_agent.set_destination(behaviour_agent.vehicle.get_location(), AUTOPILOT_DESTINATION)
                        print ("Autopilot is controlled by BehaviourAgent to destination: {}".format(AUTOPILOT_DESTINATION))

                    behaviour_agent.update_information()

                    # print("Autopilot driving {}  more waypoints till destination is reached".format(len(behaviour_agent.get_local_planner().waypoints_queue)))
                    
                    if len(behaviour_agent.get_local_planner().waypoints_queue) <= 0: # For destination precision change this value
                        print("Target almost reached, mission accomplished...")
                        controller._agent_autopilot_enabled = False
                        behaviour_agent.set_destination(behaviour_agent.vehicle.get_location(), behaviour_agent.vehicle.get_location())
                    else:
                        input_control = behaviour_agent.run_step()
                        world.player.apply_control(input_control)

                prev_agent_autopilot_enabled = controller._agent_autopilot_enabled
                '''
                behaviour agent end
                '''


                '''
                scenario logic
                '''
                if sync_mode.scenario and (scenario_class == "BikeCrossing"):
                    
                    # print ("sync_mode_scenario stage {}".format(stage))

                    #check distance to bike
                    bike_dist = scenario_instance.check_distance(trans.location)
                    #if distance is below threshold, do something
                    #first stage: flash warning
                    #2nd stage: bike starts crossing

                    if stage==0:
                        if bike_dist < trigger_distances[0]:
                            #stage 0 to 1 or 0 to 3 transition
                            if controller._agent_autopilot_enabled:
                                stage = 1
                            else:
                                stage = 3
                    elif stage==1:
                        #stage 1: play warning sound
                        if snapshot.timestamp.elapsed_seconds - sound_time > 3:
                            beep.play()
                            sound_time = snapshot.timestamp.elapsed_seconds - 0.75
                        if snapshot.timestamp.elapsed_seconds - flash_time > 1.5:
                            flash_on = not flash_on
                            flash_time = snapshot.timestamp.elapsed_seconds - 0.75

                        if bike_dist < trigger_distances[1]:
                            flash_on = False
                            #stage one to two transition
                            scenario_instance.begin()
                            #disable autopilot
                            controller._agent_autopilot_enabled = False
                            print('autopilot toggled: {}'.format(controller._agent_autopilot_enabled))
                            sync_mode.car.set_autopilot(controller._agent_autopilot_enabled)
                            stage = 2
                        elif controller._agent_autopilot_enabled == False:
                            #stage 1 to 3 transition
                            flash_on = False
                            stage = 3
                    elif stage==2:
                        #delta, throttle, brake = driver.drive(heading_error, delta_y, vx, curvature, 28, min(dist_to_car, dist_to_walker))
                        vc = carla.VehicleControl(throttle=0, steer=0, brake=0.01)
                        sync_mode.car.apply_control(vc)

                        v = vehicle.get_velocity()
                        vx, vy = util.measure_forward_velocity(v, trans.rotation, return_both=True)
                        if vx <= 0.1 :
                            stage = 3
                        
                    elif stage == 3:
                        #stage 3, bike crossing still gets triggered
                        #but driver stays in control
                        if bike_dist > trigger_distances[1]:
                            scenario_instance.begin()
                            stage = 4
                        elif controller._agent_autopilot_enabled:
                            stage = 1

                    elif stage==4:
                        #nothing happens in stage 4
                        # print ("Scenario done, close it")
                        # scenario_instance.kill_npcs()
                        sync_mode.scenario = False
                        pass

		if sync_mode.scenario and (scenario_class == "CarCrashScenario"):
                    
                    # print ("sync_mode_scenario stage {}".format(stage))

                    #check distance to cars
                    cars_dist = scenario_instance.check_distance(trans.location)
                    #if distance is below threshold, do something
                    #first stage: flash warning
                    #2nd stage: none

                    if stage==0:
                        if cars_dist < trigger_distances[0]:
                            scenario_instance.begin()
                            #stage 0 to 1 or 0 to 3 transition
                            if controller._agent_autopilot_enabled:
                                stage = 1
                            else:
                                stage = 3
                    elif stage==1:
                        #stage 1: play warning sound
                        if snapshot.timestamp.elapsed_seconds - sound_time > 3:
                            beep.play()
                            sound_time = snapshot.timestamp.elapsed_seconds - 0.75
                        if snapshot.timestamp.elapsed_seconds - flash_time > 1.5:
                            flash_on = not flash_on
                            flash_time = snapshot.timestamp.elapsed_seconds - 0.75

                        if cars_dist < trigger_distances[1]:
                            flash_on = False
                            #stage one to two transition
                            scenario_instance.begin()
                            #disable autopilot
                            controller._agent_autopilot_enabled = False
                            print('autopilot toggled: {}'.format(controller._agent_autopilot_enabled))
                            sync_mode.car.set_autopilot(controller._agent_autopilot_enabled)
                            stage = 2
                        elif controller._agent_autopilot_enabled == False:
                            #stage 1 to 3 transition
                            flash_on = False
                            stage = 3
                    elif stage==2:
                        #delta, throttle, brake = driver.drive(heading_error, delta_y, vx, curvature, 28, min(dist_to_car, dist_to_walker))
                        vc = carla.VehicleControl(throttle=0, steer=0, brake=0.01)
                        sync_mode.car.apply_control(vc)

                        v = vehicle.get_velocity()
                        vx, vy = util.measure_forward_velocity(v, trans.rotation, return_both=True)
                        if vx <= 0.1 :
                            stage = 3
                        
                    elif stage == 3:
                        #stage 3, car crash still gets triggered
                        #but driver stays in control
                        if cars_dist > trigger_distances[1]:
                            scenario_instance.begin()
                            stage = 4
                        elif controller._agent_autopilot_enabled:
                            stage = 1

                    elif stage==4:
                        #nothing happens in stage 4
                        # print ("Scenario done, close it")
                        # scenario_instance.kill_npcs()
                        sync_mode.scenario = False
                        pass
		
        if sync_mode.scenario and (scenario_class == "PedestrianCrossing"):
                    
                    # print ("sync_mode_scenario stage {}".format(stage))

                    #check distance to pedestrian
                    pedestrian_dist = scenario_instance.check_distance(trans.location)
                    #if distance is below threshold, do something
                    #first stage: flash warning
                    #2nd stage: pedestrian starts crossing

                    if stage==0:
                        if pedestrian_dist < trigger_distances[0]:
                            scenario_instance.begin()
                            #stage 0 to 1 or 0 to 3 transition
                            if controller._agent_autopilot_enabled:
                                stage = 1
                            else:
                                stage = 3
                    elif stage==1:
                        #stage 1: play warning sound
                        if snapshot.timestamp.elapsed_seconds - sound_time > 3:
                            beep.play()
                            sound_time = snapshot.timestamp.elapsed_seconds - 0.75
                        if snapshot.timestamp.elapsed_seconds - flash_time > 1.5:
                            flash_on = not flash_on
                            flash_time = snapshot.timestamp.elapsed_seconds - 0.75

                        if pedestrian_dist < trigger_distances[1]:
                            flash_on = False
                            #stage one to two transition
                            # scenario_instance.begin()
                            #disable autopilot
                            controller._agent_autopilot_enabled = False
                            print('autopilot toggled: {}'.format(controller._agent_autopilot_enabled))
                            sync_mode.car.set_autopilot(controller._agent_autopilot_enabled)
                            stage = 2
                        elif controller._agent_autopilot_enabled == False:
                            #stage 1 to 3 transition
                            flash_on = False
                            stage = 3
                    elif stage==2:
                        #delta, throttle, brake = driver.drive(heading_error, delta_y, vx, curvature, 28, min(dist_to_car, dist_to_walker))
                        vc = carla.VehicleControl(throttle=0, steer=0, brake=0.01)
                        sync_mode.car.apply_control(vc)

                        v = vehicle.get_velocity()
                        vx, vy = util.measure_forward_velocity(v, trans.rotation, return_both=True)
                        if vx <= 0.1 :
                            stage = 3
                        
                    elif stage == 3:
                        #stage 3, pedestrian crossing still gets triggered
                        #but driver stays in control
                        if pedestrian_dist<trigger_distances[1]:
                            scenario_instance.begin()
                            stage = 4
                        elif controller._agent_autopilot_enabled:
                            stage = 1

                    elif stage==4:
                        #nothing happens in stage 4
                        # print ("Scenario done, close it")
                        # scenario_instance.kill_npcs()
                        sync_mode.scenario = False
                        pass

                '''end of scenario code'''

                #record
                if vx > 0.01:
                    record_start_flag = True
                if sync_mode.record and record_start_flag:
                    #sync_mode.record_image(tick_data[1:])
                    sync_mode.record_frame(snapshot, trans, v, control_states, affordance, second_wp, hlc, stage)

                #image_semseg.convert(carla.ColorConverter.CityScapesPalette)
                fps = round(1.0 / snapshot.timestamp.delta_seconds)

                # Draw the display.
                display.fill((0,0,0), rect=fill_rect)
                # draw_image(display, image_rgb)
                draw_cam(display, image_rgb, 0, 0, False)
                draw_cam(display, image_front, (SCREEN_W/2)-FRONT_W/2, 0)
                draw_cam(display, image_mirror_left, 0, SCREEN_H - MIRROR_H)
                draw_cam(display, image_mirror_right, MIRROR_W+DISPLAY_W, SCREEN_H - MIRROR_H)

                if flash_on:
                    display.blit(
                        font_big.render('Please takeover control of the vehicle', True, (255,0,0)),
                        (50 + SCREEN_W//2-265, SCREEN_H//2))
                    display.blit(wheel_icon, (SCREEN_W * 0.46,SCREEN_H//3 + 64))

                display.blit(
                    font.render('steer: {0:2.2f}, brake: {1:2.2f}, gas: {2:2.2f}'.format(control_states.steer,control_states.brake,control_states.throttle), False, (255, 255, 255)),
                    (8, 20))
                display.blit(
                    font.render('% 5d Gear' % control_states.gear, False, (255, 255, 255)),
                    (8, 40))


                display.blit(
                    font_big.render('Speed: % 5d km/h' % vx_kph, False, (50, 87, 168)),
                    (SCREEN_W * 0.45, SCREEN_H - 205))

                autopilot_str_val = 'OFF'
                if controller._agent_autopilot_enabled == True: 
                    autopilot_str_val = 'ON'
                display.blit(
                    font_big.render('  Autopilot: ' + autopilot_str_val, True, (139, 61, 136)),
                    (SCREEN_W * 0.45, SCREEN_H - 240))

                pygame.display.flip()

    except Exception as exception:
        print (str(exception))

    finally:
        print('destroying actors.')
        if args.scenario:
            if scenario_instance is not None:
                scenario_instance.kill_npcs()
        for actor in actor_list:
            actor.destroy()

    pygame.quit()
    print('done.')
Пример #21
0
def create_environment(world, sensors, n_vehicles, n_walkers, spawn_points,
                       client):
    global sensors_callback
    sensors_ret = []
    blueprint_library = world.get_blueprint_library()

    # setup sensors
    for sensor_spec in sensors:
        bp = blueprint_library.find(sensor_spec['type'])

        if sensor_spec['type'].startswith('sensor.camera'):
            bp.set_attribute('image_size_x', str(sensor_spec['width']))
            bp.set_attribute('image_size_y', str(sensor_spec['height']))
            bp.set_attribute('fov', str(sensor_spec['fov']))
            sensor_location = carla.Location(x=sensor_spec['x'],
                                             y=sensor_spec['y'],
                                             z=sensor_spec['z'])
            sensor_rotation = carla.Rotation(pitch=sensor_spec['pitch'],
                                             roll=sensor_spec['roll'],
                                             yaw=sensor_spec['yaw'])

        elif sensor_spec['type'].startswith('sensor.lidar'):
            bp.set_attribute('range', '200')
            bp.set_attribute('rotation_frequency', '10')
            bp.set_attribute('channels', '32')
            bp.set_attribute('upper_fov', '15')
            bp.set_attribute('lower_fov', '-30')
            bp.set_attribute('points_per_second', '500000')
            sensor_location = carla.Location(x=sensor_spec['x'],
                                             y=sensor_spec['y'],
                                             z=sensor_spec['z'])
            sensor_rotation = carla.Rotation(pitch=sensor_spec['pitch'],
                                             roll=sensor_spec['roll'],
                                             yaw=sensor_spec['yaw'])

        elif sensor_spec['type'].startswith('sensor.other.gnss'):
            sensor_location = carla.Location(x=sensor_spec['x'],
                                             y=sensor_spec['y'],
                                             z=sensor_spec['z'])
            sensor_rotation = carla.Rotation()

        # create sensor
        sensor_transform = carla.Transform(sensor_location, sensor_rotation)
        sensor = world.spawn_actor(bp, sensor_transform)

        # add callbacks
        sc = CallBack()
        sensor.listen(sc)

        sensors_callback.append(sc)
        sensors_ret.append(sensor)

    vehicles_list = []
    walkers_list = []
    all_id = []

    blueprint = world.get_blueprint_library().filter('vehicle.audi.a2')[0]
    walker_bp = world.get_blueprint_library().filter(
        "walker.pedestrian.0001")[0]

    # @todo cannot import these directly.
    SpawnActor = carla.command.SpawnActor
    SetAutopilot = carla.command.SetAutopilot
    FutureActor = carla.command.FutureActor

    # --------------
    # Spawn vehicles
    # --------------
    batch = []
    for num, transform in enumerate(spawn_points):
        if num >= n_vehicles:
            break
        blueprint.set_attribute('role_name', 'autopilot')
        batch.append(
            SpawnActor(blueprint,
                       transform).then(SetAutopilot(FutureActor, True)))

    for response in client.apply_batch_sync(batch, False):
        if response.error:
            logging.error(response.error)
        else:
            vehicles_list.append(response.actor_id)

    # -------------
    # Spawn Walkers
    # -------------
    # some settings
    percentagePedestriansRunning = 0.0  # how many pedestrians will run
    percentagePedestriansCrossing = 0.0  # how many pedestrians will walk through the road
    # 1. take all the random locations to spawn
    spawn_points = []
    for i in range(n_walkers):
        spawn_point = carla.Transform()
        loc = world.get_random_location_from_navigation()
        if (loc != None):
            spawn_point.location = loc
            spawn_points.append(spawn_point)
    # 2. we spawn the walker object
    batch = []
    walker_speed = []
    for spawn_point in spawn_points:
        # set as not invincible
        if walker_bp.has_attribute('is_invincible'):
            walker_bp.set_attribute('is_invincible', 'false')
        # set the max speed
        if walker_bp.has_attribute('speed'):
            # walking
            walker_speed.append(
                walker_bp.get_attribute('speed').recommended_values[1])
        else:
            print("Walker has no speed")
            walker_speed.append(0.0)
        batch.append(SpawnActor(walker_bp, spawn_point))
    results = client.apply_batch_sync(batch, True)
    walker_speed2 = []
    for i in range(len(results)):
        if results[i].error:
            logging.error(results[i].error)
        else:
            walkers_list.append({"id": results[i].actor_id})
            walker_speed2.append(walker_speed[i])
    walker_speed = walker_speed2
    # 3. we spawn the walker controller
    batch = []
    walker_controller_bp = world.get_blueprint_library().find(
        'controller.ai.walker')
    for i in range(len(walkers_list)):
        batch.append(
            SpawnActor(walker_controller_bp, carla.Transform(),
                       walkers_list[i]["id"]))
    results = client.apply_batch_sync(batch, True)
    for i in range(len(results)):
        if results[i].error:
            logging.error(results[i].error)
        else:
            walkers_list[i]["con"] = results[i].actor_id
        # 4. we put altogether the walkers and controllers id to get the objects from their id
    for i in range(len(walkers_list)):
        all_id.append(walkers_list[i]["con"])
        all_id.append(walkers_list[i]["id"])
    all_actors = world.get_actors(all_id)

    # wait for a tick to ensure client receives the last transform of the walkers we have just created
    world.wait_for_tick()

    # 5. initialize each controller and set target to walk to (list is [controler, actor, controller, actor ...])
    # set how many pedestrians can cross the road
    world.set_pedestrians_cross_factor(percentagePedestriansCrossing)
    for i in range(0, len(all_id), 2):
        # start walker
        all_actors[i].start()
        # set walk to random point
        all_actors[i].go_to_location(
            world.get_random_location_from_navigation())
        # max speed
        all_actors[i].set_max_speed(float(walker_speed[int(i / 2)]))

    print('Spawned %d vehicles and %d walkers.' %
          (len(vehicles_list), len(walkers_list)))

    return vehicles_list, walkers_list, all_id, all_actors, sensors_ret
     world = client.load_world('Town06')  # highway town simple
 else:  # otherwise, continue using Town06
     world = client.get_world()
 worldmap = world.get_map()
 blueprint_library = world.get_blueprint_library()
 bp = blueprint_library.filter('model3')[0]  # blueprint for Tesla Model 3
 bp.set_attribute('color', '0,0,255')  # blue
 bp2 = blueprint_library.filter('prius')[
     0]  # blueprint for red Toyota Prius
 bp3 = blueprint_library.filter('mustang')[
     0]  # blueprint for light blue Ford Mustang
 print(bp)
 global vehicle
 vehicle = None
 spawn_point = carla.Transform(carla.Location(151.071, 147.458, 2.5),
                               carla.Rotation(0, 0.234757, 0))
 spawn_point_2 = carla.Transform(carla.Location(124.071, 150.58, 2.5),
                                 carla.Rotation(0, 0.234757, 0))
 spawn_point_3 = carla.Transform(carla.Location(154.071, 150.458, 2.5),
                                 carla.Rotation(0, 0.234757, 0))
 #spawn_point = random.choice(world.get_map().get_spawn_points())
 print(str(spawn_point.location) + str(spawn_point.rotation))
 vehicle = world.try_spawn_actor(bp, spawn_point)  # spawn the car (actor)
 vehicle_2 = world.try_spawn_actor(bp2,
                                   spawn_point_2)  # spawn the car (actor)
 vehicle_3 = world.try_spawn_actor(bp3,
                                   spawn_point_3)  # spawn the car (actor)
 # get the blueprint for this sensor
 blueprint = blueprint_library.find('sensor.camera.rgb')
 #blueprint.set_attribute('post_processing', 'SemanticSegmentation')
 # change the dimensions of the image
def get_transform(angle, d=6.5):
    a = math.radians(angle)
    location = carla.Location(d * math.cos(a), d * math.sin(a), 2.0) + LOCATION
    return carla.Transform(location, carla.Rotation(yaw=180 + angle,
                                                    pitch=-15))
Пример #24
0
    def convert_position_to_transform(position):
        """
        Convert an OpenScenario position into a CARLA transform

        Not supported: Road, RelativeRoad, Lane, RelativeLane as the PythonAPI currently
                       does not provide sufficient access to OpenDrive information
                       Also not supported is Route. This can be added by checking additional
                       route information
        """

        if position.find('World') is not None:
            world_pos = position.find('World')
            x = float(world_pos.attrib.get('x', 0))
            y = float(world_pos.attrib.get('y', 0))
            z = float(world_pos.attrib.get('z', 0))
            yaw = float(world_pos.attrib.get('h', 0))
            pitch = float(world_pos.attrib.get('p', 0))
            roll = float(world_pos.attrib.get('r', 0))
            return carla.Transform(
                carla.Location(x=x, y=y, z=z),
                carla.Rotation(yaw=yaw, pitch=pitch, roll=roll))
        elif (position.find('RelativeWorld')
              is not None) or (position.find('RelativeObject') is not None):
            rel_pos = position.find('RelativeWorld')
            obj = float(rel_pos.attrib.get('object'))
            obj_actor = None

            for actor in CarlaDataProvider.get_world().get_actors():
                if actor.attributes['role_name'] == obj:
                    obj_actor = actor
                    break

            if obj_actor is None:
                raise AttributeError(
                    "Object '{}' provided as position reference is not known".
                    format(obj))

            dx = float(rel_pos.attrib.get('dx', 0))
            dy = float(rel_pos.attrib.get('dy', 0))
            dz = float(rel_pos.attrib.get('dz', 0))

            is_absolute = False
            if rel_pos.find('Orientation') is not None:
                orientation = rel_pos.find('Orientation')
                is_absolute = (orientation.attrib.get('type') == "absolute")
                dyaw = float(orientation.attrib.get('h', 0))
                dpitch = float(orientation.attrib.get('p', 0))
                droll = float(orientation.attrib.get('r', 0))

            x = obj_actor.get_location().x + dx
            y = obj_actor.get_location().y + dy
            z = obj_actor.get_location().z + dz
            yaw = obj_actor.get_transform().rotation.yaw
            pitch = obj_actor.get_transform().rotation.pitch
            roll = obj_actor.get_transform().rotation.roll
            if not is_absolute:
                yaw = yaw + dyaw
                pitch = pitch + dpitch
                roll = roll + droll
            else:
                yaw = dyaw
                pitch = dpitch
                roll = droll

            return carla.Transform(
                carla.Location(x=x, y=y, z=z),
                carla.Rotation(yaw=yaw, pitch=pitch, roll=roll))
        elif position.find('Road') is not None:
            raise NotImplementedError("Road positions are not yet supported")
        elif position.find('RelativeRoad') is not None:
            raise NotImplementedError(
                "RelativeRoad positions are not yet supported")
        elif position.find('Lane') is not None:
            raise NotImplementedError("Lane positions are not yet supported")
        elif position.find('RelativeLane') is not None:
            raise NotImplementedError(
                "RelativeLane positions are not yet supported")
        elif position.find('Route') is not None:
            raise NotImplementedError("Route positions are not yet supported")
        else:
            raise AttributeError("Unknown position")
    def restart(self):
        self.decision = None
        self.simul_time = time.time()
        self.lane_change_time = time.time()
        self.max_Lane_num = 4
        self.ego_Lane = 2
        self.controller = None
        self.accumulated_reward = 0
        self.section = 0
        self.episode_start = time.time()
        self.pre_max_Lane_num = self.max_Lane_num
        self.index = 0
        print('start destroying actors.')

        if len(self.actor_list) != 0:
            # print('destroying actors.')
            # print("actor 제거 :", self.actor_list)

            for actor in self.actor_list:
                # print("finally 에서 actor 제거 :", self.actor_list)
                if actor.is_alive:
                    actor.destroy()
            print("finshed actor destroy")
            # for x in self.actor_list:
            #     try:
            #         client.apply_batch([carla.command.DestroyActors(x.id)])
            #     except:
            #         continue
        if len(self.extra_list) != 0:
            # client.apply_batch([carla.command.DestoryActors(x.id) for x in self.extra_list])
            # print("extra 제거 :", self.extra_list)
            for x in self.extra_list:
                try:
                    client.apply_batch([carla.command.DestroyActor(x.id)])
                except:
                    continue
            # for extra in self.extra_list:
            #     # print("finally 에서 actor 제거 :", self.extra_list)
            #     print(extra.is_alive)
            #     if extra.is_alive:
            #         extra.destroy()
            print("finshed extra destroy")

            # for x in self.extra_list:
            #     try:
            #         client.apply_batch([carla.command.DestroyActor(x.id)])
            #     except:
            #         continue
        print('finished destroying actors.')

        self.actor_list = []
        self.extra_list = []
        self.ROI_extra_list = []
        self.extra_controller_list = []
        self.extra_dl_list = []
        self.ROI_extra_dl_list = []
        blueprint_library = self.world.get_blueprint_library()
        # start_pose = random.choice(self.map.get_spawn_points())
        start_pose = self.map.get_spawn_points()[102]

        ##spawn points 시뮬레이션 상 출력##
        # print(start_pose)
        # for n, x in enumerate(self.map.get_spawn_points()):
        #     world.debug.draw_string(x.location, 'o', draw_shadow=True,
        #                             color=carla.Color(r=0, g=255, b=255), life_time=30)

        # self.load_traj()

        self.waypoint = self.map.get_waypoint(start_pose.location,
                                              lane_type=carla.LaneType.Driving)
        self.end_point = self.waypoint.next(400)[0].transform.location
        # print(self.waypoint.transform)
        ## Ego vehicle의 global Route 출력##
        world.debug.draw_string(self.waypoint.next(400)[0].transform.location,
                                'o',
                                draw_shadow=True,
                                color=carla.Color(r=0, g=255, b=255),
                                life_time=100)

        # print(start_pose)
        # print(self.waypoint.transform)

        # self.controller = MPCController.Controller

        self.player = world.spawn_actor(
            random.choice(blueprint_library.filter('vehicle.bmw.grandtourer')),
            start_pose)
        # print(self.player.bounding_box) # ego vehicle length

        self.actor_list.append(self.player)

        self.camera_rgb = RGBSensor(self.player, self.hud)
        self.actor_list.append(self.camera_rgb.sensor)

        # self.camera_depth =DepthCamera(self.player, self.hud)
        # self.actor_list.append(self.camera_depth.sensor)

        # self.camera_semseg = SegmentationCamera(self.player,self.hud)
        # self.actor_list.append(self.camera_semseg.sensor)

        self.collision_sensor = CollisionSensor(self.player,
                                                self.hud)  # 충돌 여부 판단하는 센서
        self.actor_list.append(self.collision_sensor.sensor)

        self.lane_invasion_sensor = LaneInvasionSensor(
            self.player, self.hud)  # lane 침입 여부 확인하는 센서
        self.actor_list.append(self.lane_invasion_sensor.sensor)

        self.gnss_sensor = GnssSensor(self.player)
        self.actor_list.append(self.gnss_sensor.sensor)

        # --------------
        # Spawn Surrounding vehicles
        # --------------

        print("Generate Extra")
        spawn_points = []
        blueprints = self.world.get_blueprint_library().filter('vehicle.*')
        blueprints = [
            x for x in blueprints
            if int(x.get_attribute('number_of_wheels')) == 4
        ]
        # print(*blueprints)
        for i in range(10, 10 * (self.extra_num) + 1, 10):
            dl = random.choice([-1, 0, 1])
            self.extra_dl_list.append(dl)
            spawn_point = None
            if dl == -1:
                spawn_point = self.waypoint.next(
                    i)[0].get_left_lane().transform
            elif dl == 0:
                spawn_point = self.waypoint.next(i)[0].transform
            elif dl == 1:
                spawn_point = self.waypoint.next(
                    i)[0].get_right_lane().transform
            else:
                print("Except ")
            spawn_point = carla.Transform(
                (spawn_point.location + carla.Location(z=1)),
                spawn_point.rotation)
            spawn_points.append(spawn_point)
            # print(blueprint_library.filter('vehicle.bmw.grandtourer'))
            # blueprint = random.choice(blueprint_library.filter('vehicle.bmw.grandtourer'))

            blueprint = random.choice(blueprints)
            # print(blueprint.has_attribute('color'))
            if blueprint.has_attribute('color'):
                # color = random.choice(blueprint.get_attribute('color').recommended_values)
                # print(blueprint.get_attribute('color').recommended_values)
                color = '255,255,255'
                blueprint.set_attribute('color', color)
            extra = self.world.spawn_actor(blueprint, spawn_point)
            self.extra_list.append(extra)
        print('Extra Genration Finished')

        self.spectator.set_transform(
            carla.Transform(
                self.player.get_transform().location + carla.Location(z=100),
                carla.Rotation(pitch=-90)))

        ROI = 1000
        # extra_target_velocity = 10
        port = 8000
        traffic_manager = client.get_trafficmanager(port)
        traffic_manager.set_global_distance_to_leading_vehicle(100.0)
        # traffic_manager.set_synchronous_mode(True) # for std::out_of_range eeror
        tm_port = traffic_manager.get_port()
        for extra in self.extra_list:
            extra.set_autopilot(True, tm_port)
            # Pure_puresuit_controller(extra, self.waypoint, None, 50)  # km/h
            # target_velocity = 30 #random.randrange(10, 40) # km/h
            # extra.enable_constant_velocity(extra.get_trzansform().get_right_vector() * target_velocity/3.6)
            traffic_manager.auto_lane_change(extra, False)
        # self.player.set_autopilot(True,tm_port)
        # traffic_manager.auto_lane_change(self.player, False)
        self.controller = Pure_puresuit_controller(self.player, self.waypoint,
                                                   self.extra_list, 70)  # km/h

        # target_velocity = 60 / 3.6
        # forward_vec = self.player.get_transform().get_forward_vector()
        # print(forward_vec)
        # velocity_vec =  target_velocity*forward_vec
        # self.player.set_target_velocity(velocity_vec)
        # print(velocity_vec)

        # print(velocity_vec)
        # client.get_trafficmanager.auto_lane_change(extra, False)
        ###Test####
        # clock = pygame.time.Clock()
        # Keyboardcontrol = KeyboardControl(self, False)
        # display = pygame.display.set_mode(
        #     (self.width, self.height),
        #     pygame.HWSURFACE | pygame.DOUBLEBUF)
        # while True:
        #     if Keyboardcontrol.parse_events(client, self, clock):
        #         return
        #     self.spectator.set_transform(
        #         carla.Transform(self.player.get_transform().location + carla.Location(z=50),
        #                         carla.Rotation(pitch=-90)))
        #     self.camera_rgb.render(display)
        #     self.hud.render(display)
        #     pygame.display.flip()
        #
        #     self.controller.apply_control()
        #     # self.world.wait_for_tick(10.0)
        #     clock.tick(30)
        #
        #     self.hud.tick(self, clock)

        #### Test Finished #####
        #### Test2 #####
        # cnt=0
        # clock = pygame.time.Clock()
        # while True:
        #     # print(self.waypoint.lane_id)
        #     self.spectator.set_transform(
        #         carla.Transform(self.player.get_transform().location + carla.Location(z=100),
        #                         carla.Rotation(pitch=-90)))
        #     cnt += 1
        #     if cnt == 100:
        #         print('수해유 ㅠㅠㅠㅠㅠㅠㅠㅠㅠㅠㅠㅠㅠㅠㅠㅠ')
        #         decision = 1
        #         self.controller.apply_control(decision)
        #     else:
        #        self.controller.apply_control()
        #     clock.tick(30)
        #### Test2 Finished #####

        # self.input_size = (self.extra_num)*4 + 1
        self.input_size = 4  #dr dv da dl
        self.output_size = 3

        # for response in client.apply_batch_sync(batch):
        #     if response.error:
        #         logging.error(response.error)
        #     else:
        #         self.extra_list.append(response.actor_id)
        print(5)
Пример #26
0
def scenario(scenario_params, agent_constructor, ctrl_params,
             carla_synchronous, eval_env, eval_stg):
    ego_vehicle = None
    agent = None
    other_vehicles = []
    client, world, carla_map, traffic_manager = carla_synchronous

    try:
        map_reader = NaiveMapQuerier(world, carla_map, debug=True)
        online_config = OnlineConfig(node_type=eval_env.NodeType)

        # Mock vehicles
        spawn_points = carla_map.get_spawn_points()
        blueprints = get_all_vehicle_blueprints(world)
        spawn_indices = [scenario_params.ego_spawn_idx
                         ] + scenario_params.other_spawn_ids
        other_vehicle_ids = []
        for k, spawn_idx in enumerate(spawn_indices):
            if k == 0:
                blueprint = world.get_blueprint_library().find(
                    'vehicle.audi.a2')
            else:
                blueprint = np.random.choice(blueprints)
            spawn_point = spawn_points[spawn_idx]
            spawn_point = shift_spawn_point(carla_map, k,
                                            scenario_params.spawn_shifts,
                                            spawn_point)
            # Prevent collision with road.
            spawn_point.location += carla.Location(0, 0, 0.5)
            vehicle = world.spawn_actor(blueprint, spawn_point)
            if k == 0:
                ego_vehicle = vehicle
            else:
                vehicle.set_autopilot(True, traffic_manager.get_port())
                other_vehicles.append(vehicle)
                other_vehicle_ids.append(vehicle.id)

        world.tick()
        agent = agent_constructor(
            ego_vehicle,
            map_reader,
            other_vehicle_ids,
            eval_stg,
            n_burn_interval=scenario_params.n_burn_interval,
            control_horizon=ctrl_params.control_horizon,
            prediction_horizon=ctrl_params.prediction_horizon,
            n_predictions=ctrl_params.n_predictions,
            n_coincide=ctrl_params.n_coincide,
            scene_builder_cls=TrajectronPlusPlusSceneBuilder,
            log_agent=False,
            log_cplex=False,
            plot_scenario=True,
            plot_simulation=True,
            scene_config=online_config)
        agent.start_sensor()
        assert agent.sensor_is_listening
        if scenario_params.goal:
            agent.set_goal(scenario_params.goal.x,
                           scenario_params.goal.y,
                           is_relative=scenario_params.goal.is_relative)
        """Move the spectator to the ego vehicle.
        The positioning is a little off"""
        state = agent.get_vehicle_state()
        goal = agent.get_goal()
        if goal.is_relative:
            location = carla.Location(x=state[0] + goal.x,
                                      y=state[1] - goal.y,
                                      z=state[2] + 50)
        else:
            location = carla.Location(x=state[0] + (state[0] - goal.x) / 2.,
                                      y=state[1] - (state[1] + goal.y) / 2.,
                                      z=state[2] + 50)
        world.get_spectator().set_transform(
            carla.Transform(location, carla.Rotation(pitch=-90)))

        n_burn_frames = scenario_params.n_burn_interval * online_config.record_interval
        if scenario_params.loop_type == LoopEnum.CLOSED_LOOP:
            run_frames = scenario_params.run_interval * online_config.record_interval
        elif isinstance(agent, OAAgent):
            run_frames = ctrl_params.control_horizon * online_config.record_interval - 1
        else:
            run_frames = ctrl_params.n_coincide * online_config.record_interval - 1
        for idx in range(n_burn_frames + run_frames):
            control = None
            for ctrl in scenario_params.controls:
                if ctrl.interval[0] <= idx and idx <= ctrl.interval[1]:
                    control = ctrl.control
                    break
            frame = world.tick()
            agent.run_step(frame, control=control)

    finally:
        if agent:
            agent.destroy()
        if ego_vehicle:
            ego_vehicle.destroy()
        for other_vehicle in other_vehicles:
            other_vehicle.destroy()

    time.sleep(1)
    def main(self):
        PATH = "/home/a/RL_decision/trained_info.tar"
        print(torch.cuda.get_device_name())
        clock = pygame.time.Clock()
        Keyboardcontrol = KeyboardControl(self, False)
        display = pygame.display.set_mode((self.width, self.height),
                                          pygame.HWSURFACE | pygame.DOUBLEBUF)
        self.agent = decision_driving_Agent(self.input_size, self.output_size,
                                            True, 1000, self.extra_num)
        self.lane_start_point = [
            carla.Location(x=14.905815, y=-135.747452, z=0.000000),
            carla.Location(x=172.745468, y=-364.531799, z=0.000000),
            carla.Location(x=382.441040, y=-212.488907, z=0.000000)
        ]
        self.lane_finished_point = [
            carla.Location(x=14.631096, y=-205.746918, z=0.000000),
            carla.Location(x=232.962860, y=-364.149139, z=0.000000),
            carla.Location(x=376.542816, y=-10.352980, z=0.000000)
        ]
        self.lane_change_point = [
            carla.Location(x=14.905815, y=-135.747452, z=0.000000),
            carla.Location(x=14.631096, y=-205.746918, z=0.000000),
            carla.Location(x=172.745468, y=-364.531799, z=0.000000),
            carla.Location(x=232.962860, y=-364.149139, z=0.000000),
            carla.Location(x=382.441040, y=-212.488907, z=0.000000),
            carla.Location(x=376.542816, y=-10.352980, z=0.000000)
        ]

        self.world.debug.draw_string(carla.Location(x=14.905815,
                                                    y=-135.747452,
                                                    z=0.000000),
                                     'o',
                                     draw_shadow=True,
                                     color=carla.Color(r=0, g=255, b=0),
                                     life_time=9999)
        self.world.debug.draw_string(carla.Location(x=14.631096,
                                                    y=-205.746918,
                                                    z=0.000000),
                                     'o',
                                     draw_shadow=True,
                                     color=carla.Color(r=0, g=255, b=0),
                                     life_time=9999)
        # ---------------------------#

        self.world.debug.draw_string(carla.Location(x=172.745468,
                                                    y=-364.531799,
                                                    z=0.000000),
                                     'o',
                                     draw_shadow=True,
                                     color=carla.Color(r=0, g=255, b=0),
                                     life_time=9999)
        self.world.debug.draw_string(carla.Location(x=232.962860,
                                                    y=-364.149139,
                                                    z=0.000000),
                                     'o',
                                     draw_shadow=True,
                                     color=carla.Color(r=0, g=255, b=0),
                                     life_time=9999)
        # ---------------------------#
        self.world.debug.draw_string(carla.Location(x=376.542816,
                                                    y=-10.352980,
                                                    z=0.000000),
                                     'o',
                                     draw_shadow=True,
                                     color=carla.Color(r=0, g=255, b=0),
                                     life_time=9999)
        self.world.debug.draw_string(carla.Location(x=382.441040,
                                                    y=-212.488907,
                                                    z=0.000000),
                                     'o',
                                     draw_shadow=True,
                                     color=carla.Color(r=0, g=255, b=0),
                                     life_time=9999)
        lane_distance_between_points = []
        for i in range(len(self.lane_finished_point)):
            lane_distance_between_points.append(
                (self.lane_start_point[i].x -
                 self.lane_finished_point[i].x)**2 +
                (self.lane_start_point[i].y -
                 self.lane_finished_point[i].y)**2 +
                (self.lane_start_point[i].z -
                 self.lane_finished_point[i].z)**2)

        epoch_init = 0
        # if(os.path.exists(PATH)):
        #     print("저장된 가중치 불러옴")
        #     checkpoint = torch.load(PATH)
        #
        #     self.agent.model.load_state_dict(checkpoint['model_state_dict'])
        #     device = torch.device('cuda')
        #     self.agent.model.to(device)
        #     self.agent.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        #     self.agent.buffer = checkpoint['memorybuffer']
        #     epoch_init = checkpoint['epoch']

        # self.agent.buffer.load_state_dict(self.save_dir['memorybuffer'])

        # self.is_first_time = False
        # controller = VehicleControl(self, True)
        # self.player.apply_control(carla.Vehicle
        # Control(throttle=1.0, steer=0.0, brake=0.0))
        # vehicles = self.world.get_actors().filter('vehicle.*')
        # for i in vehicles:
        #     print(i.id)
        try:
            n_iters = 150
            is_error = 0

            for epoch in range(epoch_init, n_iters):
                # cnt = 0
                first_time_print = True

                [state, x_static] = self.get_next_state()  #초기 상태 s0 초기화
                while (True):
                    if Keyboardcontrol.parse_events(client, self, clock):
                        return

                    self.camera_rgb.render(display)
                    self.hud.render(display)
                    pygame.display.flip()

                    self.spectator.set_transform(
                        carla.Transform(
                            self.player.get_transform().location +
                            carla.Location(z=100), carla.Rotation(pitch=-90)))

                    ## Get max lane ##
                    if self.section < len(lane_distance_between_points):
                        self.lane_distance_between_start = (
                            (self.player.get_transform().location.x -
                             self.lane_start_point[self.section].x)**2 +
                            (self.player.get_transform().location.y -
                             self.lane_start_point[self.section].y)**2)
                        self.lane_distance_between_end = (
                            (self.player.get_transform().location.x -
                             self.lane_finished_point[self.section].x)**2 +
                            (self.player.get_transform().location.y -
                             self.lane_finished_point[self.section].y)**2)

                        # print("self.lane_distance_between_start : ",self.lane_distance_between_start,"self.lane_distance_between_end :",self.lane_distance_between_end, "lane_distance_between_points[self.section]",lane_distance_between_points[self.section],"self.section :", self.section)
                        if max(lane_distance_between_points[self.section], self.lane_distance_between_start,
                               self.lane_distance_between_end) == \
                                lane_distance_between_points[self.section]:
                            self.max_Lane_num = 3
                            # world.debug.draw_string(self.player.get_transform().location, 'o', draw_shadow = True,
                            #                                 color = carla.Color(r=255, g=255, b=0), life_time = 999)

                        elif max(lane_distance_between_points[self.section], self.lane_distance_between_start,
                                 self.lane_distance_between_end) == \
                                self.lane_distance_between_start and self.max_Lane_num == 3:
                            self.section += 1
                            self.max_Lane_num = 4
                            if self.section >= len(lane_distance_between_points
                                                   ):  # when, section_num = 3
                                self.section = 0
                    ## finished get max lane ##
                    ##visualize when, max lane ==3 ##
                    if self.max_Lane_num == 3:
                        self.world.debug.draw_string(
                            self.waypoint.transform.location,
                            'o',
                            draw_shadow=True,
                            color=carla.Color(r=255, g=255, b=255),
                            life_time=9999)
                    # [self.extra_list, self.extra_dl_list] = self.agent.search_extra_in_ROI(self.extra_list,self.player,self.extra_dl_list)

                    ## finished to visualize ##

                    if self.agent.is_training:
                        ##dqn 과정##
                        # 가중치 초기화 (pytroch 내부)
                        # 입실론-그리디 행동 탐색 (act function)
                        # 메모리 버퍼에 MDP 튜플 얻기   ㅡ (step function)
                        # 메모리 버퍼에 MDP 튜플 저장   ㅡ
                        # optimal Q 추정             ㅡ   (learning function)
                        # Loss 계산                  ㅡ
                        # 가중치 업데이트              ㅡ

                        if epoch % 10 == 0:
                            # [w, b] = self.agent.model.parameters()  # unpack parameters
                            self.save_dir = torch.save(
                                {
                                    'epoch':
                                    epoch,
                                    'model_state_dict':
                                    self.agent.model.state_dict(),
                                    'optimizer_state_dict':
                                    self.agent.optimizer.state_dict(),
                                    'memorybuffer':
                                    self.agent.buffer
                                }, PATH)
                        # print(clock.get_fps())
                        # if  time.time() - self.simul_time >10 and clock.get_fps()  < 15:
                        #     self.restart()
                        #     time.sleep(3)
                        #     continue
                        if self.decision is not None:
                            [next_state, next_x_static] = self.get_next_state()
                            sample = [
                                state, x_static, self.decision, reward,
                                next_state, next_x_static, done
                            ]
                            self.agent.buffer.append(sample)

                        # print("befor act")
                        self.decision = self.agent.act(state, x_static)
                        # print("after act")

                        # print(decision)
                        # if self.decision ==1 and self.max_Lane_num==self.ego_Lane:
                        #     print( " ")
                        # print("befroe safte check/ 판단 :", self.decision, "최대 차선 :", self.max_Lane_num, "ego lane :",self.ego_Lane)

                        self.decision = self.safety_check(self.decision)
                        # print("판단 :", self.decision, "최대 차선 :", self.max_Lane_num, "ego lane :",self.ego_Lane)

                        # print("before")
                        is_error = self.controller.apply_control(self.decision)
                        # print("after")

                        # print("extra_controller 개수 :", len(self.extra_controller_list))
                        # for i in range(len(self.extra_controller_list)):
                        #     self.extra_controller_list[i].apply_control()

                        # print("before step")
                        [state, x_static, decision, reward, __, _,
                         done] = self.step(self.decision)
                        # print("after step")

                        if done:
                            print("epsilon :", self.agent.epsilon)
                            print("epoch : ", epoch, "누적 보상 : ",
                                  self.accumulated_reward)
                            writer.add_scalar('누적 보상 ',
                                              self.accumulated_reward, epoch)
                            if len(self.agent.buffer.size()
                                   ) > self.agent.batch_size:
                                # print("start learning")
                                for i in range(300):
                                    self.agent.learning()
                            # print("finished learning")
                            client.set_timeout(10)
                            self.restart()
                            break

                    else:
                        self.agent.act(state, x_static)

                    clock.tick(40)
                    self.hud.tick(self, clock)

        finally:
            print('\ndestroying %d vehicles' % len(self.extra_list))
            # client.apply_batch([carla.command.DestroyActor(x) for x in self.extra_list])

            print('destroying actors.')
            # client.apply_batch([carla.command.DestroyActors(x.id) for x in self.actor_list])
            # client.apply_batch([carla.command.DestroyActors(x.id) for x in self.extra_list])
            for actor in self.actor_list:
                # print("finally 에서 actor 제거 :", self.actor_list)
                if actor.is_alive:
                    actor.destroy()
            for extra in self.extra_list:
                # print("finally 에서 actor 제거 :", self.extra_list)
                if extra.is_alive:
                    extra.destroy()

            # pygame.quit()
            print('done.')
Пример #28
0
      int(box[1][0]-box[0][0]), #width
      int(box[1][1] - box[0][1]), #height
      linewidth=1,edgecolor='r',facecolor='none')
    PLT_IMG_AX.add_patch(rect)
print('successfully connected to Carla')
world = client.get_world()
settings = world.get_settings()
world.apply_settings(settings)
map = world.get_map()
bp = world.get_blueprint_library().find('sensor.camera.rgb')
bp.set_attribute('image_size_x', str(IM_WIDTH))
bp.set_attribute('image_size_y', str(IM_HEIGHT))
bp.set_attribute('fov', '110')
# Set the time in seconds between sensor captures
bp.set_attribute('sensor_tick', '0.2')
transform = carla.Transform(carla.Location(x=-65.0, y=3.0, z=6.0), carla.Rotation(yaw=180.0, pitch=-30.0))
camera = world.spawn_actor(bp, transform)
camera.listen(processImg)
dbp = world.get_blueprint_library().find('sensor.camera.depth')
dbp.set_attribute('image_size_x', str(IM_WIDTH))
dbp.set_attribute('image_size_y', str(IM_HEIGHT))
dbp.set_attribute('fov', '110')
dcamera = world.spawn_actor(dbp, transform)
dcamera.listen(processDepthImg)
_, PLT_IMG_AX = plt.subplots(1)
ani = FuncAnimation(plt.gcf(), imgUpdate, interval=50)
plt.show()

# image.raw_data

# # Default format (depends on the camera PostProcessing but always a numpy array).
Пример #29
0
    def __init__(self, *args, **kwargs):  #parent_actor, hud, gamma_correction
        self.sensor = None
        self.surface = None
        self._parent = args[0]
        self.hud = args[1]
        self.recording = False
        bound_y = 0.5 + self._parent.bounding_box.extent.y
        Attachment = carla.AttachmentType
        self._camera_transforms = [
            (carla.Transform(carla.Location(x=-5.5, z=2.5),
                             carla.Rotation(pitch=8.0)), Attachment.SpringArm),
            (carla.Transform(carla.Location(x=1.6, z=1.7)), Attachment.Rigid),
            (carla.Transform(carla.Location(x=5.5, y=1.5,
                                            z=1.5)), Attachment.SpringArm),
            (carla.Transform(carla.Location(x=-8.0, z=6.0),
                             carla.Rotation(pitch=6.0)), Attachment.SpringArm),
            (carla.Transform(carla.Location(x=-1, y=-bound_y,
                                            z=0.5)), Attachment.Rigid)
        ]
        self.transform_index = 1
        self.sensors = [
            ['sensor.camera.rgb', cc.Raw, 'Camera RGB', {}],
            ['sensor.camera.depth', cc.Raw, 'Camera Depth (Raw)', {}],
            ['sensor.camera.depth', cc.Depth, 'Camera Depth (Gray Scale)', {}],
            [
                'sensor.camera.depth', cc.LogarithmicDepth,
                'Camera Depth (Logarithmic Gray Scale)', {}
            ],
            [
                'sensor.camera.semantic_segmentation', cc.Raw,
                'Camera Semantic Segmentation (Raw)', {}
            ],
            [
                'sensor.camera.semantic_segmentation', cc.CityScapesPalette,
                'Camera Semantic Segmentation (CityScapes Palette)', {}
            ],
            [
                'sensor.lidar.ray_cast', None, 'Lidar (Ray-Cast)', {
                    'range': '50'
                }
            ], ['sensor.camera.dvs', cc.Raw, 'Dynamic Vision Sensor', {}],
            [
                'sensor.camera.rgb', cc.Raw, 'Camera RGB Distorted', {
                    'lens_circle_multiplier': '3.0',
                    'lens_circle_falloff': '3.0',
                    'chromatic_aberration_intensity': '0.5',
                    'chromatic_aberration_offset': '0'
                }
            ]
        ]
        world = self._parent.get_world()
        bp_library = world.get_blueprint_library()
        for item in self.sensors:
            bp = bp_library.find(item[0])
            if item[0].startswith('sensor.camera'):
                bp.set_attribute('image_size_x', str(self.hud.dim[0]))
                bp.set_attribute('image_size_y', str(self.hud.dim[1]))
                if bp.has_attribute('gamma'):
                    bp.set_attribute('gamma', str(args[2]))
                for attr_name, attr_value in item[3].items():
                    bp.set_attribute(attr_name, attr_value)
            elif item[0].startswith('sensor.lidar'):
                self.lidar_range = 50

                for attr_name, attr_value in item[3].items():
                    bp.set_attribute(attr_name, attr_value)
                    if attr_name == 'range':
                        self.lidar_range = float(attr_value)

            item.append(bp)
        self.index = None
Пример #30
0
# ]

# ego_transforms = [
#     carla.Transform(carla.Location(x=187.220924, y=198.343231, z=3.553675), carla.Rotation(pitch=-1.277402, yaw=-179.359268, roll=-0.017578)),
#     carla.Transform(carla.Location(x=187.220924, y=195.343231, z=3.553675), carla.Rotation(pitch=-1.277402, yaw=-179.359268, roll=-0.017578)),
#     carla.Transform(carla.Location(x=187.220924, y=201.343231, z=3.553675), carla.Rotation(pitch=-1.277402, yaw=-179.359268, roll=-0.017578)),
#     carla.Transform(carla.Location(x=182.220924, y=198.343231, z=3.553675), carla.Rotation(pitch=-1.277402, yaw=-179.359268, roll=-0.017578)),
#     carla.Transform(carla.Location(x=182.220924, y=195.343231, z=3.553675), carla.Rotation(pitch=-1.277402, yaw=-179.359268, roll=-0.017578)),
#     carla.Transform(carla.Location(x=182.220924, y=201.343231, z=3.553675), carla.Rotation(pitch=-1.277402, yaw=-179.359268, roll=-0.017578)),
#     carla.Transform(carla.Location(x=192.220924, y=198.343231, z=3.553675), carla.Rotation(pitch=-1.277402, yaw=-179.359268, roll=-0.017578)),
#     carla.Transform(carla.Location(x=192.220924, y=195.343231, z=3.553675), carla.Rotation(pitch=-1.277402, yaw=-179.359268, roll=-0.017578)),
#     carla.Transform(carla.Location(x=192.220924, y=201.343231, z=3.553675), carla.Rotation(pitch=-1.277402, yaw=-179.359268, roll=-0.017578))
# ]

ego_transforms = [
    carla.Transform(carla.Location(x=187.220924, y=198.343231, z=3.553675), carla.Rotation(pitch=-1.277402, yaw=-179.359268, roll=-0.017578)),
    # carla.Transform(carla.Location(x=167.220924, y=195.343231, z=3.553675), carla.Rotation(pitch=-1.277402, yaw=-179.359268, roll=-0.017578)),
    # carla.Transform(carla.Location(x=167.220924, y=201.343231, z=3.553675), carla.Rotation(pitch=-1.277402, yaw=-179.359268, roll=-0.017578)),
    # carla.Transform(carla.Location(x=162.220924, y=195.343231, z=3.553675), carla.Rotation(pitch=-1.277402, yaw=-179.359268, roll=-0.017578)),
    # carla.Transform(carla.Location(x=162.220924, y=201.343231, z=3.553675), carla.Rotation(pitch=-1.277402, yaw=-179.359268, roll=-0.017578))
    # carla.Transform(carla.Location(x=192.220924, y=195.343231, z=3.553675), carla.Rotation(pitch=-1.277402, yaw=-179.359268, roll=-0.017578)),
    # carla.Transform(carla.Location(x=192.220924, y=201.343231, z=3.553675), carla.Rotation(pitch=-1.277402, yaw=-179.359268, roll=-0.017578))
]

def process_point_cloud(args, point_cloud_carla, save_lidar_data):
    if save_lidar_data:
        point_cloud_carla.save_to_disk(args.data_dir + '/lidar' +'/%.6d.ply' % point_cloud_carla.frame)
    
    # Creating a numpy array as well. To be used later    
    pcd = np.copy(np.frombuffer(point_cloud_carla.raw_data, dtype=np.dtype('float32')))
    pcd = np.reshape(pcd, (int(pcd.shape[0] / 4), 4))