示例#1
0
def make_carla_settings(args):
    """Make a CarlaSettings object with the settings we need.
    """
    settings = CarlaSettings()

    settings.set(SynchronousMode=True,
                 SendNonPlayerAgentsInfo=False,
                 NumberOfVehicles=65,
                 NumberOfPedestrians=10,
                 WeatherId=1)
    #settings.randomize_seeds()#uncomment to get different random settings
    #initialize depth and rgb camera
    camera1 = Camera('CameraDepth', PostProcessing='Depth', FOV=fov)
    camera1.set_image_size(*image_size)
    camera1.set_position(*camera_local_pos)
    camera1.set_rotation(*camera_local_rotation)
    settings.add_sensor(camera1)

    camera2 = Camera('CameraRGB', PostProcessing='SceneFinal', FOV=fov)
    camera2.set_image_size(*image_size)
    camera2.set_position(*camera_local_pos)
    camera2.set_rotation(*camera_local_rotation)
    settings.add_sensor(camera2)

    return settings, camera2
    def build_experiments(self):
        """
        Creates the whole set of experiment objects,
        The experiments created depend on the selected Town.


        """

        # We set the camera
        # This single RGB camera is used on every experiment

        camera = Camera('CameraRGB')
        camera.set(FOV=100)
        camera.set_image_size(200, 88)
        camera.set_position(2.0, 0.0, 1.4)
        camera.set_rotation(-15.0, 0, 0)

        if self._city_name == 'Town01':
            poses_tasks = self._poses_town01()
            vehicles_tasks = [0, 0, 0, 20]
            pedestrians_tasks = [0, 0, 0, 50]
        else:
            poses_tasks = self._poses_town02()
            vehicles_tasks = [0, 0, 0, 15]
            pedestrians_tasks = [0, 0, 0, 50]

        experiments_vector = []

        for weather in self.weathers:

            for iteration in range(len(poses_tasks)):
                poses = poses_tasks[iteration]

                #weather=14

                #poses = [[54, 60]]
                #poses=[[37,76]]
                #poses = [[80, 29]]
                #poses = [[50,43]]
                vehicles = vehicles_tasks[iteration]
                pedestrians = pedestrians_tasks[iteration]

                conditions = CarlaSettings()
                conditions.set(SendNonPlayerAgentsInfo=True,
                               NumberOfVehicles=vehicles,
                               NumberOfPedestrians=pedestrians,
                               WeatherId=weather)
                conditions.set(SynchronousMode=True)
                # Add all the cameras that were set for this experiments

                conditions.add_sensor(camera)

                experiment = Experiment()
                experiment.set(Conditions=conditions,
                               Poses=poses,
                               Task=iteration,
                               Repetitions=1)
                experiments_vector.append(experiment)

        return experiments_vector
    def client_init(self, client):

        self.client = client

        self.carla_settings = CarlaSettings()
        self.carla_settings.set(SynchronousMode=True,
                                SendNonPlayerAgentsInfo=True,
                                NumberOfVehicles=20,
                                NumberOfPedestrians=40,
                                QualityLevel='Low',
                                )

        camera0 = Camera('CameraRGB')
        camera0.set_image_size(self.HEIGHT, self.WIDTH)
        camera0.set_position(2.0, 0.0, 1.4)
        camera0.set_rotation(0.0, 0.0, 0.0)
        camera1 = Camera('CameraSegmentation', PostProcessing='SemanticSegmentation')
        camera1.set_image_size(self.HEIGHT, self.WIDTH)
        camera1.set_position(2.0, 0, 1.4)

        self.settings = CarlaSettings()
        self.settings.add_sensor(camera0)
        self.settings.add_sensor(camera1)

        scene = self.client.load_settings(self.settings)
        start_spots = scene.player_start_spots
        self.target = start_spots[26]
        self.player_start = 26
        cv2.namedWindow('im', cv2.WINDOW_NORMAL)
示例#4
0
 def _build_camera(self, name, post):
     camera = Camera(name, PostProcessing=post)
     camera.set_image_size(
         self.config["render_x_res"], self.config["render_y_res"])
     camera.set_position(x=2.4, y=0, z=0.8)
     camera.set_rotation(pitch=-40, roll=0, yaw=0)
     # camera.FOV = 100
     return camera
示例#5
0
    def _add_sensors(self):
        camera0 = Camera('RenderCamera0')
        # Set image resolution in pixels.
        camera0.set_image_size(800, 600)
        # Set its position relative to the car in meters.
        camera0.set_position(-4.30, 0, 2.60)
        camera0.set_rotation(pitch=-25, yaw=0, roll=0)

        self.settings.add_sensor(camera0)
示例#6
0
    def build_experiments(self):
        """
            Creates the whole set of experiment objects,
            The experiments created depends on the selected Town.

        """

        # We check the town, based on that we define the town related parameters
        # The size of the vector is related to the number of tasks, inside each
        # task there is also multiple poses ( start end, positions )
        if self._city_name == 'Town01':
            poses_tasks = [[[7, 3]], [[138, 17]], [[140, 134]], [[140, 134]]]
            vehicles_tasks = [0, 0, 0, 20]
            pedestrians_tasks = [0, 0, 0, 50]
        else:
            poses_tasks = [[[4, 2]], [[37, 76]], [[19, 66]], [[19, 66]]]
            vehicles_tasks = [0, 0, 0, 15]
            pedestrians_tasks = [0, 0, 0, 50]

        # We set the camera
        # This single RGB camera is used on every experiment

        camera = Camera('CameraRGB')
        camera.set(FOV=100)
        camera.set_image_size(800, 600)
        camera.set_position(2.0, 0.0, 1.4)
        camera.set_rotation(-15.0, 0, 0)

        # Based on the parameters, creates a vector with experiment objects.
        experiments_vector = []
        for weather in self.weathers:

            for iteration in range(len(poses_tasks)):
                poses = poses_tasks[iteration]
                vehicles = vehicles_tasks[iteration]
                pedestrians = pedestrians_tasks[iteration]

                conditions = CarlaSettings()
                conditions.set(
                    SendNonPlayerAgentsInfo=True,
                    NumberOfVehicles=vehicles,
                    NumberOfPedestrians=pedestrians,
                    WeatherId=weather

                )
                # Add all the cameras that were set for this experiments
                conditions.add_sensor(camera)
                experiment = Experiment()
                experiment.set(
                    Conditions=conditions,
                    Poses=poses,
                    Task=iteration,
                    Repetitions=1
                )
                experiments_vector.append(experiment)

        return experiments_vector
    def build_experiments(self):
        """
            Creates the whole set of experiment objects,
            The experiments created depends on the selected Town.

        """

        # We check the town, based on that we define the town related parameters
        # The size of the vector is related to the number of tasks, inside each
        # task there is also multiple poses ( start end, positions )
        if self._city_name == 'Town01':
            poses_tasks = [[[7, 3]], [[138, 17]], [[140, 134]], [[140, 134]]]
            vehicles_tasks = [0, 0, 0, 20]
            pedestrians_tasks = [0, 0, 0, 50]
        else:
            poses_tasks = [[[4, 2]], [[37, 76]], [[19, 66]], [[19, 66]]]
            vehicles_tasks = [0, 0, 0, 15]
            pedestrians_tasks = [0, 0, 0, 50]

        # We set the camera
        # This single RGB camera is used on every experiment

        camera = Camera('CameraRGB')
        camera.set(FOV=100)
        camera.set_image_size(800, 600)
        camera.set_position(2.0, 0.0, 1.4)
        camera.set_rotation(-15.0, 0, 0)

        # Based on the parameters, creates a vector with experiment objects.
        experiments_vector = []
        for weather in self.weathers:

            for iteration in range(len(poses_tasks)):
                poses = poses_tasks[iteration]
                vehicles = vehicles_tasks[iteration]
                pedestrians = pedestrians_tasks[iteration]

                conditions = CarlaSettings()
                conditions.set(
                    SendNonPlayerAgentsInfo=True,
                    NumberOfVehicles=vehicles,
                    NumberOfPedestrians=pedestrians,
                    WeatherId=weather

                )
                # Add all the cameras that were set for this experiments
                conditions.add_sensor(camera)
                experiment = Experiment()
                experiment.set(
                    Conditions=conditions,
                    Poses=poses,
                    Task=iteration,
                    Repetitions=1
                )
                experiments_vector.append(experiment)

        return experiments_vector
示例#8
0
    def build_experiments(self):
        """
        Creates the whole set of experiment objects,
        The experiments created depend on the selected Town.


        """

        # We set the camera
        # This single RGB camera is used on every experiment

        camera = Camera('CameraRGB')
        camera.set(FOV=100)
        camera.set_image_size(800, 600)
        camera.set_position(2.0, 0.0, 1.4)
        camera.set_rotation(-15.0, 0, 0)

        if self._city_name == 'Town01':
            poses_tasks = self._poses_town01()
            vehicles_tasks = [0, 0, 0, 20]
            pedestrians_tasks = [0, 0, 0, 50]
        else:
            poses_tasks = self._poses_town02()
            vehicles_tasks = [0, 0, 0, 15]
            pedestrians_tasks = [0, 0, 0, 50]

        experiments_vector = []

        for weather in self.weathers:

            for iteration in range(len(poses_tasks)):
                poses = poses_tasks[iteration]
                vehicles = vehicles_tasks[iteration]
                pedestrians = pedestrians_tasks[iteration]

                conditions = CarlaSettings()
                conditions.set(
                    SendNonPlayerAgentsInfo=True,
                    NumberOfVehicles=vehicles,
                    NumberOfPedestrians=pedestrians,
                    WeatherId=weather
                )
                # Add all the cameras that were set for this experiments

                conditions.add_sensor(camera)

                experiment = Experiment()
                experiment.set(
                    Conditions=conditions,
                    Poses=poses,
                    Task=iteration,
                    Repetitions=1
                )
                experiments_vector.append(experiment)

        return experiments_vector
    def build_experiments(self):
        """
        Creates the whole set of experiment objects,
        The experiments created depend on the selected Town.
        """

        # We set the camera
        # This single RGB camera is used on every experiment

        camera = Camera('CameraRGB')
        camera.set(FOV=100)
        camera.set_image_size(800, 600)
        camera.set_position(2.0, 0.0, 1.4)
        camera.set_rotation(-15.0, 0, 0)

        if self._city_name == 'Town01':
            poses_tasks = self._poses_town01()
            vehicles_tasks = [25]
            pedestrians_tasks = [25]
        else:
            poses_tasks = self._poses_town02()
            vehicles_tasks = [0, 25]
            pedestrians_tasks = [0, 25]

        experiments_vector = []
        #weathers = [1,3,6,8]
        for weather in self.weathers:
            #prinst(weather , vehicles_tasks)
            for scenario in range(len(vehicles_tasks)):
                for iteration in range(len(poses_tasks)):
                    #print(f"interation : {iteration} , scenario:{scenario}")
                    poses = poses_tasks[iteration]
                    #print("poses re",poses)
                    vehicles = vehicles_tasks[scenario]
                    #print("Vehicles: ",vehicles)
                    pedestrians = pedestrians_tasks[scenario]
                    #print("pedestrians: ",pedestrians)

                    conditions = CarlaSettings()
                    conditions.set(SendNonPlayerAgentsInfo=True,
                                   NumberOfVehicles=vehicles,
                                   NumberOfPedestrians=pedestrians,
                                   WeatherId=weather)
                    # Add all the cameras that were set for this experiments

                    conditions.add_sensor(camera)

                    experiment = Experiment()
                    experiment.set(Conditions=conditions,
                                   Poses=poses,
                                   Task=iteration,
                                   Repetitions=1)
                    experiments_vector.append(experiment)

        return experiments_vector
示例#10
0
    def build_experiments(self):
        """
        Creates the whole set of experiment objects,
        The experiments created depend on the selected Town.


        """

        # We set the camera
        # This single RGB camera is used on every experiment

        camera = Camera('rgb')
        camera.set(FOV=90)
        camera.set_image_size(300, 300)
        camera.set_position(0.2, 0.0, 0.85)
        camera.set_rotation(-3.0, 0, 0)

        poses_tasks = self._poses()
        vehicles_tasks = [0, 0, 0, 15]
        pedestrians_tasks = [0, 0, 0, 50]



        experiments_vector = []

        for weather in self.weathers:

            for iteration in range(len(poses_tasks)):
                poses = poses_tasks[iteration]
                vehicles = vehicles_tasks[iteration]
                pedestrians = pedestrians_tasks[iteration]

                conditions = CarlaSettings()
                conditions.set(
                    SendNonPlayerAgentsInfo=True,
                    NumberOfVehicles=vehicles,
                    NumberOfPedestrians=pedestrians,
                    WeatherId=weather,
                    QualityLevel='Epic'
                )
                # Add all the cameras that were set for this experiments

                conditions.add_sensor(camera)

                experiment = Experiment()
                experiment.set(
                    Conditions=conditions,
                    Poses=poses,
                    Task=iteration,
                    Repetitions=1
                )
                experiments_vector.append(experiment)

        return experiments_vector
示例#11
0
    def __init__(self,
                 client,
                 frame_skip=1,
                 cam_width=800,
                 cam_height=600,
                 town_string='Town01'):
        super(StraightDriveEnv, self).__init__()
        self.frame_skip = frame_skip
        self.client = client
        self._planner = Planner(town_string)

        camera0 = Camera('CameraRGB')
        camera0.set(CameraFOV=100)
        camera0.set_image_size(cam_height, cam_width)
        camera0.set_position(200, 0, 140)
        camera0.set_rotation(-15.0, 0, 0)

        self.start_goal_pairs = [[36, 40], [39, 35], [110, 114], [7,
                                                                  3], [0, 4],
                                 [68, 50], [61, 59], [47, 64], [147, 90],
                                 [33, 87], [26, 19], [80, 76], [45, 49],
                                 [55, 44], [29, 107], [95, 104], [84, 34],
                                 [53, 67], [22, 17], [91, 148], [20, 107],
                                 [78, 70], [95, 102], [68, 44], [45, 69]]

        vehicles = 0
        pedestrians = 0
        weather = 1

        settings = CarlaSettings()
        settings.set(
            SynchronousMode=True,
            SendNonPlayerAgentsInfo=True,
            NumberOfVehicles=vehicles,
            NumberOfPedestrians=pedestrians,
            WeatherId=weather,
        )

        settings.randomize_seeds()
        settings.add_sensor(camera0)

        self.scene = self.client.load_settings(settings)

        img_shape = (cam_width, cam_height, 3)
        self.observation_space = spaces.Tuple(
            (spaces.Box(-np.inf, np.inf, (3, )), spaces.Box(0, 255,
                                                            img_shape)))
        self.action_space = spaces.Box(-np.inf, np.inf, shape=(3, ))

        self.prev_state = np.array([0., 0., 0.])
        self.prev_collisions = np.array([0., 0., 0.])
        self.prev_intersections = np.array([0., 0.])
    def build_experiments(self):
        """
        Creates the whole set of experiment objects,
        The experiments created depend on the selected Town.


        """

        # We set the camera
        # This single RGB camera is used on every experiment

        camera = Camera('rgb')
        camera.set(FOV=100)
        camera.set_image_size(800, 600)
        camera.set_position(2.0, 0.0, 1.4)
        camera.set_rotation(-15.0, 0, 0)

        poses_tasks = self._poses()
        vehicles_tasks = [0, 15, 70]
        pedestrians_tasks = [0, 50, 150]

        task_names = ['empty', 'normal', 'cluttered']

        experiments_vector = []

        for weather in self.weathers:

            for iteration in range(len(poses_tasks)):
                poses = poses_tasks[iteration]
                vehicles = vehicles_tasks[iteration]
                pedestrians = pedestrians_tasks[iteration]

                conditions = CarlaSettings()
                conditions.set(SendNonPlayerAgentsInfo=True,
                               NumberOfVehicles=vehicles,
                               NumberOfPedestrians=pedestrians,
                               WeatherId=weather)
                conditions.set(DisableTwoWheeledVehicles=True)
                # Add all the cameras that were set for this experiments

                conditions.add_sensor(camera)

                experiment = Experiment()
                experiment.set(Conditions=conditions,
                               Poses=poses,
                               Task=iteration,
                               TaskName=task_names[iteration],
                               Repetitions=1)
                experiments_vector.append(experiment)

        return experiments_vector
示例#13
0
    def add_cameras(self, settings, cameras, camera_width, camera_height):
        # add a front facing camera
        print("Available Cameras are ", cameras)
        if CameraTypes.FRONT in cameras:
            camera = Camera(CameraTypes.FRONT.value)
            camera.set(FOV=100)
            camera.set_image_size(camera_width, camera_height)
            camera.set_position(2.0, 0, 1.4)
            camera.set_rotation(-15.0, 0, 0)
            settings.add_sensor(camera)

        # add a left facing camera
        if CameraTypes.LEFT in cameras:
            camera = Camera(CameraTypes.LEFT.value)
            camera.set(FOV=100)
            camera.set_image_size(camera_width, camera_height)
            camera.set_position(2.0, 0, 1.4)
            camera.set_rotation(-15.0, -30, 0)
            settings.add_sensor(camera)

        # add a right facing camera
        if CameraTypes.RIGHT in cameras:
            camera = Camera(CameraTypes.RIGHT.value)
            camera.set(FOV=100)
            camera.set_image_size(camera_width, camera_height)
            camera.set_position(2.0, 0, 1.4)
            camera.set_rotation(-15.0, 30, 0)
            settings.add_sensor(camera)

        # add a front facing depth camera
        if CameraTypes.DEPTH in cameras:
            camera = Camera(CameraTypes.DEPTH.value)
            camera.set_image_size(camera_width, camera_height)
            camera.set_position(0.2, 0, 1.3)
            camera.set_rotation(8, 30, 0)
            camera.PostProcessing = 'Depth'
            settings.add_sensor(camera)

        # add a front facing semantic segmentation camera
        if CameraTypes.SEGMENTATION in cameras:
            camera = Camera(CameraTypes.SEGMENTATION.value)
            camera.set_image_size(camera_width, camera_height)
            camera.set_position(0.2, 0, 1.3)
            camera.set_rotation(8, 30, 0)
            camera.PostProcessing = 'SemanticSegmentation'
            settings.add_sensor(camera)
            print("Successfully adding a SemanticSegmentation camera")

        return settings
示例#14
0
    def _add_cameras(self, settings, cameras, camera_width, camera_height):
        # add a front facing camera
        if CameraTypes.FRONT in cameras:
            camera = Camera(CameraTypes.FRONT.value)
            camera.set_image_size(camera_width, camera_height)
            camera.set_position(0.2, 0, 1.3)
            camera.set_rotation(8, 0, 0)
            settings.add_sensor(camera)

        # add a left facing camera
        if CameraTypes.LEFT in cameras:
            camera = Camera(CameraTypes.LEFT.value)
            camera.set_image_size(camera_width, camera_height)
            camera.set_position(0.2, 0, 1.3)
            camera.set_rotation(8, -30, 0)
            settings.add_sensor(camera)

        # add a right facing camera
        if CameraTypes.RIGHT in cameras:
            camera = Camera(CameraTypes.RIGHT.value)
            camera.set_image_size(camera_width, camera_height)
            camera.set_position(0.2, 0, 1.3)
            camera.set_rotation(8, 30, 0)
            settings.add_sensor(camera)

        # add a front facing depth camera
        if CameraTypes.DEPTH in cameras:
            camera = Camera(CameraTypes.DEPTH.value)
            camera.set_image_size(camera_width, camera_height)
            camera.set_position(0.2, 0, 1.3)
            camera.set_rotation(8, 30, 0)
            camera.PostProcessing = 'Depth'
            settings.add_sensor(camera)

        # add a front facing semantic segmentation camera
        if CameraTypes.SEGMENTATION in cameras:
            camera = Camera(CameraTypes.SEGMENTATION.value)
            camera.set_image_size(camera_width, camera_height)
            camera.set_position(0.2, 0, 1.3)
            camera.set_rotation(8, 30, 0)
            camera.PostProcessing = 'SemanticSegmentation'
            settings.add_sensor(camera)

        return settings
示例#15
0
def run_carla_client(args):
    # Here we will run 3 episodes with 300 frames each.
    number_of_episodes = 15
    frames_per_episode = 1230
    #              [0 , 1 , 2 , 3 , 4  , 5  , 6  , 7  , 8  , 9  , 10, 11, 12, 13, 14]
    vehicles_num = [
        140, 100, 120, 80, 90, 100, 80, 90, 110, 100, 80, 70, 70, 80, 100
    ]

    # We assume the CARLA server is already waiting for a client to connect at
    # host:port. To create a connection we can use the `make_carla_client`
    # context manager, it creates a CARLA client object and starts the
    # connection. It will throw an exception if something goes wrong. The
    # context manager makes sure the connection is always cleaned up on exit.
    with make_carla_client(args.host, args.port) as client:
        print('CarlaClient connected')

        for episode in range(0, number_of_episodes):
            # Start a new episode.

            if args.settings_filepath is None:

                # Create a CarlaSettings object. This object is a wrapper around
                # the CarlaSettings.ini file. Here we set the configuration we
                # want for the new episode.
                settings = CarlaSettings()
                settings.set(
                    SynchronousMode=True,
                    SendNonPlayerAgentsInfo=False,
                    NumberOfVehicles=vehicles_num[
                        episode],  #random.choice([0, 20, 15, 20, 25, 21, 24, 18, 40, 35, 25, 30]), #25,
                    NumberOfPedestrians=120,
                    DisableTwoWheeledVehicles=False,
                    WeatherId=episode,  #1, #random.choice([1, 3, 7, 8, 14]),
                    QualityLevel=args.quality_level)
                settings.randomize_seeds()

                # Now we want to add a couple of cameras to the player vehicle.
                # We will collect the images produced by these cameras every
                # frame.

                # LEFT RGB CAMERA
                camera_l = Camera('LeftCameraRGB', PostProcessing='SceneFinal')
                camera_l.set_image_size(800, 600)
                camera_l.set_position(1.30, -0.27, 1.50)
                settings.add_sensor(camera_l)

                # LEFT DEPTH
                camera_ld = Camera('LeftCameraDepth', PostProcessing='Depth')
                camera_ld.set_image_size(800, 600)
                camera_ld.set_position(1.30, -0.27, 1.50)
                settings.add_sensor(camera_ld)

                # LEFT SEGMENTATION
                camera_ls = Camera('LeftCameraSeg',
                                   PostProcessing='SemanticSegmentation')
                camera_ls.set_image_size(800, 600)
                camera_ls.set_position(1.30, -0.27, 1.50)
                settings.add_sensor(camera_ls)

                # RIGHT RGB CAMERA
                camera_r = Camera('RightCameraRGB',
                                  PostProcessing='SceneFinal')
                camera_r.set_image_size(800, 600)
                camera_r.set_position(1.30, 0.27, 1.50)
                settings.add_sensor(camera_r)

                # RIGHT DEPTH
                camera_rd = Camera('RightCameraDepth', PostProcessing='Depth')
                camera_rd.set_image_size(800, 600)
                camera_rd.set_position(1.30, 0.27, 1.50)
                settings.add_sensor(camera_rd)

                # RIGHT SEGMENTATION
                camera_rs = Camera('RightCameraSeg',
                                   PostProcessing='SemanticSegmentation')
                camera_rs.set_image_size(800, 600)
                camera_rs.set_position(1.30, 0.27, 1.50)
                settings.add_sensor(camera_rs)

                # LEFT 15 DEGREE RGB CAMERA
                camera_l_15 = Camera('15_LeftCameraRGB',
                                     PostProcessing='SceneFinal')
                camera_l_15.set_image_size(800, 600)
                camera_l_15.set_position(1.30, -0.7, 1.50)  # [X, Y, Z]
                camera_l_15.set_rotation(0, -15.0,
                                         0)  # [pitch(Y), yaw(Z), roll(X)]
                settings.add_sensor(camera_l_15)

                # LEFT 15 DEGREE DEPTH
                camera_ld_15 = Camera('15_LeftCameraDepth',
                                      PostProcessing='Depth')
                camera_ld_15.set_image_size(800, 600)
                camera_ld_15.set_position(1.30, -0.7, 1.50)
                camera_ld_15.set_rotation(0, -15.0, 0)
                settings.add_sensor(camera_ld_15)

                # LEFT 15 DEGREE SEGMENTATION
                camera_ls_15 = Camera('15_LeftCameraSeg',
                                      PostProcessing='SemanticSegmentation')
                camera_ls_15.set_image_size(800, 600)
                camera_ls_15.set_position(1.30, -0.7, 1.50)
                camera_ls_15.set_rotation(0, -15.0, 0)
                settings.add_sensor(camera_ls_15)

                # Right 15 DEGREE RGB CAMERA
                camera_r_15 = Camera('15_RightCameraRGB',
                                     PostProcessing='SceneFinal')
                camera_r_15.set_image_size(800, 600)
                camera_r_15.set_position(1.30, 0.7, 1.50)
                camera_r_15.set_rotation(0, 15.0, 0)
                settings.add_sensor(camera_r_15)

                # Right 15 DEGREE DEPTH
                camera_rd_15 = Camera('15_RightCameraDepth',
                                      PostProcessing='Depth')
                camera_rd_15.set_image_size(800, 600)
                camera_rd_15.set_position(1.30, 0.7, 1.50)
                camera_rd_15.set_rotation(0, 15.0, 0)
                settings.add_sensor(camera_rd_15)

                # RIGHT 15 DEGREE SEGMENTATION
                camera_ls_15 = Camera('15_RightCameraSeg',
                                      PostProcessing='SemanticSegmentation')
                camera_ls_15.set_image_size(800, 600)
                camera_ls_15.set_position(1.30, 0.7, 1.50)
                camera_ls_15.set_rotation(0, 15.0, 0)
                settings.add_sensor(camera_ls_15)

                # LEFT 30 DEGREE RGB CAMERA
                camera_l_30 = Camera('30_LeftCameraRGB',
                                     PostProcessing='SceneFinal')
                camera_l_30.set_image_size(800, 600)
                camera_l_30.set_position(1.30, -0.7, 1.50)
                camera_l_30.set_rotation(0, -30.0, 0)
                settings.add_sensor(camera_l_30)

                # LEFT 30 DEGREE DEPTH
                camera_ld_30 = Camera('30_LeftCameraDepth',
                                      PostProcessing='Depth')
                camera_ld_30.set_image_size(800, 600)
                camera_ld_30.set_position(1.30, -0.7, 1.50)
                camera_ld_30.set_rotation(0, -30.0, 0)
                settings.add_sensor(camera_ld_30)

                # LEFT 30 DEGREE SEGMENTATION
                camera_ls_30 = Camera('30_LeftCameraSeg',
                                      PostProcessing='SemanticSegmentation')
                camera_ls_30.set_image_size(800, 600)
                camera_ls_30.set_position(1.30, -0.7, 1.50)
                camera_ls_30.set_rotation(0, -30.0, 0)
                settings.add_sensor(camera_ls_30)

                # RIGHT 30 DEGREE RGB CAMERA
                camera_r_30 = Camera('30_RightCameraRGB',
                                     PostProcessing='SceneFinal')
                camera_r_30.set_image_size(800, 600)
                camera_r_30.set_position(1.30, 0.7, 1.50)
                camera_r_30.set_rotation(0, 30.0, 0)
                settings.add_sensor(camera_r_30)

                # RIGHT 30 DEGREE DEPTH
                camera_rd_30 = Camera('30_RightCameraDepth',
                                      PostProcessing='Depth')
                camera_rd_30.set_image_size(800, 600)
                camera_rd_30.set_position(1.30, 0.7, 1.50)
                camera_rd_30.set_rotation(0, 30.0, 0)
                settings.add_sensor(camera_rd_30)

                # RIGHT 30 DEGREE SEGMENTATION
                camera_rs_30 = Camera('30_RightCameraSeg',
                                      PostProcessing='SemanticSegmentation')
                camera_rs_30.set_image_size(800, 600)
                camera_rs_30.set_position(1.30, 0.7, 1.50)
                camera_rs_30.set_rotation(0, 30.0, 0)
                settings.add_sensor(camera_rs_30)
            else:
                # Alternatively, we can load these settings from a file.
                with open(args.settings_filepath, 'r') as fp:
                    settings = fp.read()

            # Now we load these settings into the server. The server replies
            # with a scene description containing the available start spots for
            # the player. Here we can provide a CarlaSettings object or a
            # CarlaSettings.ini file as string.
            scene = client.load_settings(settings)

            # Choose one player start at random.
            number_of_player_starts = len(scene.player_start_spots)
            player_start = random.randint(0, max(0,
                                                 number_of_player_starts - 1))

            # Notify the server that we want to start the episode at the
            # player_start index. This function blocks until the server is ready
            # to start the episode.
            print('Starting new episode...')
            client.start_episode(player_start)

            camera_l_to_car_transform = camera_l.get_unreal_transform()
            camera_r_to_car_transform = camera_r.get_unreal_transform()
            camera_l_15_to_car_transform = camera_l_15.get_unreal_transform()
            camera_r_15_to_car_transform = camera_r_15.get_unreal_transform()
            camera_l_30_to_car_transform = camera_l_30.get_unreal_transform()
            camera_r_30_to_car_transform = camera_r_30.get_unreal_transform()

            if not os.path.isdir(args.dataset_path +
                                 "/episode_{:0>4d}".format(episode)):
                os.makedirs(args.dataset_path +
                            "/episode_{:0>4d}".format(episode))
            # Iterate every frame in the episode.
            for frame in range(0, frames_per_episode):

                # Read the data produced by the server this frame.
                measurements, sensor_data = client.read_data()

                #image = sensor_data.get('LeftCameraSeg', None)
                # array = image_converter.depth_to_logarithmic_grayscale(image)
                #array = image_converter.labels_to_cityscapes_palette(image)
                #filename = '{:0>6d}'.format(frame)
                #filename += '.png'
                #cv2.imwrite(filename, array)
                #image.save_to_disk("/data/khoshhal/Dataset/episode_{:0>4d}/{:s}/{:0>6d}".format(episode, "LeftCameraSeg", frame))

                # Print some of the measurements.
                # print_measurements(measurements)

                #player_measurements = measurements.player_measurements
                world_transform = Transform(
                    measurements.player_measurements.transform)

                # Compute the final transformation matrix.
                camera_l_to_world_transform = world_transform * camera_l_to_car_transform
                camera_r_to_world_transform = world_transform * camera_r_to_car_transform
                camera_l_15_to_world_transform = world_transform * camera_l_15_to_car_transform
                camera_r_15_to_world_transform = world_transform * camera_r_15_to_car_transform
                camera_l_30_to_world_transform = world_transform * camera_l_30_to_car_transform
                camera_r_30_to_world_transform = world_transform * camera_r_30_to_car_transform
                args.out_filename_format = dataset_path + '/episode_{:0>4d}/{:s}/{:0>6d}'
                # Save the images to disk if requested.
                if frame >= 30:
                    if args.save_images_to_disk:
                        for name, measurement in sensor_data.items():
                            filename = args.out_filename_format.format(
                                episode, name, frame - 30)
                            measurement.save_to_disk(filename)

                        # Save Transform matrix of each camera to separated files
                        line = ""
                        filename = "{}/episode_{:0>4d}/LeftCamera".format(
                            args.dataset_path, episode) + ".txt"
                        with open(filename, 'a') as LeftCamera:
                            for x in np.asarray(camera_l_to_world_transform.
                                                matrix[:3, :]).reshape(-1):
                                line += "{:.8e} ".format(x)
                            line = line[:-1]
                            line += "\n"
                            LeftCamera.write(line)
                            line = ""
                        filename = "{}/episode_{:0>4d}/RightCamera".format(
                            args.dataset_path, episode) + ".txt"
                        with open(filename, 'a') as RightCamera:
                            for x in np.asarray(camera_r_to_world_transform.
                                                matrix[:3, :]).reshape(-1):
                                line += "{:.8e} ".format(x)
                            line = line[:-1]
                            line += "\n"
                            RightCamera.write(line)
                            line = ""

                        filename = "{}/episode_{:0>4d}/15_LeftCamera".format(
                            args.dataset_path, episode) + ".txt"
                        with open(filename, 'a') as myfile:
                            for x in np.asarray(camera_l_15_to_world_transform.
                                                matrix[:3, :]).reshape(-1):
                                line += "{:.8e} ".format(x)
                            line = line[:-1]
                            line += "\n"
                            myfile.write(line)
                            line = ""

                        filename = "{}/episode_{:0>4d}/15_RightCamera".format(
                            args.dataset_path, episode) + ".txt"
                        with open(filename, 'a') as myfile:
                            for x in np.asarray(camera_r_15_to_world_transform.
                                                matrix[:3, :]).reshape(-1):
                                line += "{:.8e} ".format(x)
                            line = line[:-1]
                            line += "\n"
                            myfile.write(line)
                            line = ""

                        filename = "{}/episode_{:0>4d}/30_LeftCamera".format(
                            args.dataset_path, episode) + ".txt"
                        with open(filename, 'a') as myfile:
                            for x in np.asarray(camera_l_30_to_world_transform.
                                                matrix[:3, :]).reshape(-1):
                                line += "{:.8e} ".format(x)
                            line = line[:-1]
                            line += "\n"
                            myfile.write(line)
                            line = ""

                        filename = "{}/episode_{:0>4d}/30_RightCamera".format(
                            args.dataset_path, episode) + ".txt"
                        with open(filename, 'a') as myfile:
                            for x in np.asarray(camera_r_30_to_world_transform.
                                                matrix[:3, :]).reshape(-1):
                                line += "{:.8e} ".format(x)
                            line = line[:-1]
                            line += "\n"
                            myfile.write(line)
                            line = ""

                # We can access the encoded data of a given image as numpy
                # array using its "data" property. For instance, to get the
                # depth value (normalized) at pixel X, Y
                #
                #     depth_array = sensor_data['CameraDepth'].data
                #     value_at_pixel = depth_array[Y, X]
                #

                # Now we have to send the instructions to control the vehicle.
                # If we are in synchronous mode the server will pause the
                # simulation until we send this control.

                if not args.autopilot:

                    client.send_control(steer=random.uniform(-1.0, 1.0),
                                        throttle=0.5,
                                        brake=0.0,
                                        hand_brake=False,
                                        reverse=False)

                else:

                    # Together with the measurements, the server has sent the
                    # control that the in-game autopilot would do this frame. We
                    # can enable autopilot by sending back this control to the
                    # server. We can modify it if wanted, here for instance we
                    # will add some noise to the steer.

                    control = measurements.player_measurements.autopilot_control
                    #control.steer += random.uniform(-0.1, 0.1)
                    client.send_control(control)

                #time.sleep(1)

            myfile.close()
            LeftCamera.close()
            RightCamera.close()
示例#16
0
def run(host, port):
    number_of_episodes = args.episodes
    frames_per_episode = args.frames

    with make_carla_client(host, port) as client:
        print('CarlaClient connected')

        for batch_no in range(args.batches):
            controls_center = []
            controls_left = []
            controls_right = []
            settings = CarlaSettings()
            settings.set(SynchronousMode=False,
                         SendNonPlayerAgentsInfo=True,
                         NumberOfVehicles=100,
                         NumberOfPedestrians=100,
                         WeatherId=batch_no // 18)
            settings.randomize_seeds()

            camera_center = Camera('camera_center')
            camera_center.set_image_size(256, 128)
            camera_center.set_position(175, 0, 140)
            camera_center.set_rotation(-12, 0, 0)
            settings.add_sensor(camera_center)

            camera_left = Camera('camera_left')
            camera_left.set_image_size(256, 128)
            camera_left.set_position(175, 0, 140)
            camera_left.set_rotation(-12, 0, -30)
            if args.augmented: settings.add_sensor(camera_left)

            camera_right = Camera('camera_right')
            camera_right.set_image_size(256, 128)
            camera_right.set_position(175, 0, 140)
            camera_right.set_rotation(-12, 0, 30)
            if args.augmented: settings.add_sensor(camera_right)

            scene = client.load_settings(settings)
            number_of_player_starts = len(scene.player_start_spots)

            for episode in range(number_of_episodes):

                print('Starting new episode...:', episode)
                client.start_episode(
                    np.random.randint(0, number_of_player_starts - 1))

                # Iterate every frame in the episode.
                for frame in tqdm(range(frames_per_episode)):

                    # Read the data produced by the server this frame.
                    measurements, sensor_data = client.read_data()

                    # if valid data, save image
                    for camera_name, image in sensor_data.items():
                        image.save_to_disk(
                            args.dir +
                            '/batch_{:0>3d}/{:s}/image_{:0>5d}.jpg'.format(
                                args.batch_start + batch_no, camera_name,
                                episode * frames_per_episode + frame))

                    control = measurements.player_measurements.autopilot_control
                    controls_center.append(
                        (control.steer, control.throttle,
                         measurements.player_measurements.forward_speed,
                         control.brake))

                    if args.augmented:
                        controls_left.append(
                            (control.steer + 35 / 70, control.throttle,
                             measurements.player_measurements.forward_speed,
                             control.brake))

                        controls_right.append((
                            control.steer - 35 / 70,
                            control.throttle,
                            measurements.player_measurements.forward_speed,
                            control.brake,
                        ))

                    client.send_control(steer=control.steer +
                                        0.05 * np.random.randn(),
                                        throttle=control.throttle,
                                        brake=0.0,
                                        hand_brake=False,
                                        reverse=False)

            np.save(
                args.dir + '/batch_{:0>3d}/camera_center/controls.npy'.format(
                    args.batch_start + batch_no), controls_center)
            if args.augmented:
                np.save(
                    args.dir +
                    '/batch_{:0>3d}/camera_left/controls.npy'.format(
                        args.batch_start + batch_no), controls_left)
                np.save(
                    args.dir +
                    '/batch_{:0>3d}/camera_right/controls.npy'.format(
                        args.batch_start + batch_no), controls_right)
    def build_experiments(self):
        """
        Creates the whole set of experiment objects,
        The experiments created depend on the selected Town.


        """

        # We set the camera
        # This single RGB camera is used on every experiment

        camera = Camera('CameraRGB')
        camera.set(FOV=100)
        camera.set_image_size(800, 600)
        camera.set_position(2.0, 0.0, 1.4)
        camera.set_rotation(-15.0, 0, 0)
        if self._task == 'go-straight':
            task_select = 0
        elif self._task == 'turn-right':
            task_select = 1
        elif self._task == 'turn-left':
            task_select = 2
        elif self._task == 'go-straight-2':
            task_select = 3
        elif self._task == 'turn-right-2':
            task_select = 4
        elif self._task == 'turn-left-2':
            task_select = 5

        if self._city_name == 'Town01':
            poses_tasks = self._poses_town01()
            vehicles_tasks = [0, 0, 0, 20]
            pedestrians_tasks = [0, 0, 0, 50]
        elif self._city_name == 'Town02':
            poses_tasks = self._poses_town02()
            vehicles_tasks = [0, 0, 0, 20]
            pedestrians_tasks = [0, 0, 0, 50]
        elif self._city_name[:-1] == 'Town01_nemesis':
            poses_tasks = [self._poses_town01_nemesis()[task_select]]
            vehicles_tasks = [0, 0, 0, 0, 0, 0]
            pedestrians_tasks = [0, 0, 0, 0, 0, 0]
        elif self._city_name[:-1] == 'Town02_nemesis':
            poses_tasks = [self._poses_town02_nemesis()[task_select]]
            vehicles_tasks = [0, 0, 0, 0, 0, 0]
            pedestrians_tasks = [0, 0, 0, 0, 0, 0]

        experiments_vector = []
        """repeating experiment"""
        num_iters = self._iters
        weathers_iters = [self._weather] * num_iters

        # 0 - Default
        # 1 - ClearNoon
        # 2 - CloudyNoon
        # 3 - WetNoon
        # 4 - WetCloudyNoon
        # 5 - MidRainyNoon
        # 6 - HardRainNoon
        # 7 - SoftRainNoon
        # 8 - ClearSunset
        # 9 - CloudySunset
        # 10 - WetSunset
        # 11 - WetCloudySunset
        # 12 - MidRainSunset
        # 13 - HardRainSunset
        # 14 - SoftRainSunset

        for weather in weathers_iters:

            for iteration in range(len(poses_tasks)):
                poses = poses_tasks[iteration]
                vehicles = vehicles_tasks[iteration]
                pedestrians = pedestrians_tasks[iteration]

                ## add here
                # random the scenario
                #weather_num_random = int(random.uniform(0,14))
                #vehicles_num_random = int(random.uniform(0,1000))
                #pedestrain_num_random = int(random.uniform(0,1000))
                #vehicles = vehicles_num_random
                #pedestrains = pedestrain_num_random
                #weather = weather_num_random
                vehicles = 400
                pedestrains = 400
                #weather =
                ##

                conditions = CarlaSettings()
                conditions.set(SendNonPlayerAgentsInfo=True,
                               NumberOfVehicles=vehicles,
                               NumberOfPedestrians=pedestrians,
                               WeatherId=weather)
                # Add all the cameras that were set for this experiments

                conditions.add_sensor(camera)

                experiment = Experiment()
                experiment.set(Conditions=conditions,
                               Poses=poses,
                               Task=iteration,
                               Repetitions=1)
                experiments_vector.append(experiment)

        return experiments_vector
示例#18
0
def run(host, port, online_training):
    n_episodes = args.episodes
    frames_per_episode = args.frames
    image_dims = (256, 128)
    loss_history = []

    with make_carla_client(host, port) as client:
        print('CarlaClient connected')

        # Start a new episode.
        settings = CarlaSettings()
        settings.set(SynchronousMode=online_training,
                     SendNonPlayerAgentsInfo=True,
                     NumberOfVehicles=25,
                     NumberOfPedestrians=25,
                     WeatherId=np.random.choice(1))
        settings.randomize_seeds()

        # add 1st camera
        camera0 = Camera('CameraRGB')
        camera0.set_image_size(image_dims[0], image_dims[1])
        camera0.set_position(175, 0, 140)
        camera0.set_rotation(-12, 0, 0)
        settings.add_sensor(camera0)

        # Choose one player start at random.
        scene = client.load_settings(settings)
        number_of_player_starts = len(scene.player_start_spots)

        for episode in range(n_episodes):
            network_wrapper.init(args.network)
            print('Starting new episode:', episode)
            client.start_episode(episode % number_of_player_starts)

            # Iterate every frame in the episode.
            frame = 0
            count_diff = 0
            for frame in range(frames_per_episode):
                # Read the data produced by the server in this frame.
                measurements, sensor_data = client.read_data()

                # get the image and convert to numpy array
                image = sensor_data['CameraRGB']
                image = np.expand_dims(np.array(image.data), axis=0)
                # image, _ = network_wrapper.normalize_data(X=image)

                if online_training:

                    # train and inference simultaneously
                    predicted_controls = network_wrapper.online_training(
                        args.network, image, measurements.player_measurements.
                        autopilot_control.steer * 70)

                    # because the autopilot follows a planned path,
                    # disconnect the episode if there's a disagreement between network and autopilot
                    if abs(predicted_controls.data[0] -
                           measurements.player_measurements.autopilot_control.
                           steer * 70) > 35:
                        print('Disagreement in network and autopilot mode.')
                        print(
                            'Network says: %.5f' % predicted_controls.data[0],
                            ', Autopilot says: %.5f' %
                            (measurements.player_measurements.
                             autopilot_control.steer * 70))
                        if count_diff < 10:
                            count_diff += 1
                        else:
                            break
                    else:
                        count_diff = 0

                else:
                    predicted_controls = network_wrapper.inference(
                        args.network, image)

                steer = predicted_controls.data[0]

                loss = abs(
                    steer -
                    measurements.player_measurements.autopilot_control.steer)
                loss_history.append(loss)
                print('Loss: %.5f' % loss)

                # send control to simulator
                client.send_control(steer=steer / 70,
                                    throttle=0.5,
                                    brake=0.0,
                                    hand_brake=False,
                                    reverse=False)
                np.save('loss_history_online_training.npy',
                        np.array(loss_history))
            if online_training and count_diff < 10:
                network_wrapper.save(args.network)
def run_carla_client(host, port, far):
    # Here we will run a single episode with 300 frames.
    number_of_frames = 3000
    frame_step = 10  # Save one image every 100 frames
    output_folder = '_out'
    image_size = [1280, 720]
    camera_local_pos = [0.7, 0.0, 1.3]  # [X, Y, Z]
    camera_local_rotation = [0, 0, 0]  # [pitch(Y), yaw(Z), roll(X)]
    fov = 59
    WINDOW_WIDTH = 1280
    WINDOW_HEIGHT = 720
    MINI_WINDOW_WIDTH = 320
    MINI_WINDOW_HEIGHT = 180

    WINDOW_WIDTH_HALF = WINDOW_WIDTH / 2
    WINDOW_HEIGHT_HALF = WINDOW_HEIGHT / 2

    k = np.identity(3)
    k[0, 2] = WINDOW_WIDTH_HALF
    k[1, 2] = WINDOW_HEIGHT_HALF
    k[0, 0] = k[1, 1] = WINDOW_WIDTH / \
        (2.0 * math.tan(59.0 * math.pi / 360.0))
    intrinsic = k
    # Connect with the server
    with make_carla_client(host, port) as client:
        print('CarlaClient connected')

        # Here we load the settings.
        settings = CarlaSettings()
        settings.set(SynchronousMode=True,
                     SendNonPlayerAgentsInfo=False,
                     NumberOfVehicles=20,
                     NumberOfPedestrians=40,
                     WeatherId=1)
        settings.randomize_seeds()

        camera1 = Camera('CameraDepth', PostProcessing='Depth', FOV=fov)
        camera1.set_image_size(*image_size)
        camera1.set_position(*camera_local_pos)
        camera1.set_rotation(*camera_local_rotation)
        settings.add_sensor(camera1)

        camera2 = Camera('CameraRGB', PostProcessing='SceneFinal', FOV=fov)
        camera2.set_image_size(*image_size)
        camera2.set_position(*camera_local_pos)
        camera2.set_rotation(*camera_local_rotation)
        settings.add_sensor(camera2)
        #scene = client.load_settings(settings)
        client.load_settings(settings)
        #print("sjdsjhdjshdjshdjshgds",scene.player_start_spots[0].location.x)
        # Start at location index id '0'
        client.start_episode(0)

        # Compute the camera transform matrix
        camera_to_car_transform = camera2.get_unreal_transform()
        print("camera_to_car_transform", camera_to_car_transform)
        carla_utils_obj = segmentation.carla_utils(intrinsic)

        # Iterate every frame in the episode except for the first one.
        for frame in range(1, number_of_frames):
            # Read the data produced by the server this frame.
            measurements, sensor_data = client.read_data()

            # Save one image every 'frame_step' frames
            if not frame % frame_step:
                # Start transformations time mesure.

                # RGB image [[[r,g,b],..[r,g,b]],..[[r,g,b],..[r,g,b]]]
                image_RGB = to_rgb_array(sensor_data['CameraRGB'])
                image_depth = to_rgb_array(sensor_data['CameraDepth'])
                #measurements.player_measurements.transform.location.x-=40
                #measurements.player_measurements.transform.location.z-=2
                world_transform = Transform(
                    measurements.player_measurements.transform)

                im_bgr = cv2.cvtColor(image_RGB, cv2.COLOR_RGB2BGR)

                pos_vector = ([
                    measurements.player_measurements.transform.location.x,
                    measurements.player_measurements.transform.location.y,
                    measurements.player_measurements.transform.location.z
                ])

                fdfd = str(world_transform)
                sdsd = fdfd[1:-1].split('\n')
                new = []
                for i in sdsd:
                    dd = i[:-1].lstrip('[ ')
                    ff = re.sub("\s+", ",", dd.strip())
                    gg = ff.split(',')
                    new.append([float(i) for i in gg])
                newworld = np.array(new)
                fdfd = str(camera_to_car_transform)
                sdsd = fdfd[1:-1].split('\n')
                new = []
                for i in sdsd:
                    dd = i[:-1].lstrip('[ ')
                    ff = re.sub("\s+", ",", dd.strip())
                    gg = ff.split(',')
                    new.append([float(i) for i in gg])
                newcam = np.array(new)
                extrinsic = np.matmul(newworld, newcam)
                get_2d_point = carla_utils_obj.run_seg(im_bgr, extrinsic,
                                                       pos_vector)
                print(get_2d_point)
                depth = image_depth[int(get_2d_point[0]), int(get_2d_point[1])]
                scale = depth[0] + depth[1] * 256 + depth[2] * 256 * 256
                scale = scale / ((256 * 256 * 256) - 1)
                depth = scale * 1000
                pos2d = np.array([(WINDOW_WIDTH - get_2d_point[1]) * depth,
                                  (WINDOW_HEIGHT - get_2d_point[0]) * depth,
                                  depth])

                first = np.dot(np.linalg.inv(intrinsic), pos2d)
                first = np.append(first, 1)
                sec = np.matmul((extrinsic), first)
                print("present", pos_vector)
                print('goal-', sec)
            client.send_control(
                measurements.player_measurements.autopilot_control)
示例#20
0
def run_carla_client( args):
    # Here we will run 3 episodes with 300 frames each.
    number_of_episodes = 150
    frames_per_episode = 500

    # for start_i in range(150):
    #     if start_i%4==0:
    #         output_folder = 'Packages/CARLA_0.8.2/PythonClient/new_data-viz/test_' + str(start_i)
    #         if not os.path.exists(output_folder):
    #             os.mkdir(output_folder)
    #             print( "make "+str(output_folder))
    # ./CarlaUE4.sh -carla-server  -benchmark -fps=17 -windowed
    # carla-server "/usr/local/carla/Unreal/CarlaUE4/CarlaUE4.uproject" /usr/local/carla/Maps/Town03 -benchmark -fps=10 -windowed


    # We assume the CARLA server is already waiting for a client to connect at
    # host:port. To create a connection we can use the `make_carla_client`
    # context manager, it creates a CARLA client object and starts the
    # connection. It will throw an exception if something goes wrong. The
    # context manager makes sure the connection is always cleaned up on exit.
    with make_carla_client(args.host, args.port) as client:

        print('CarlaClient connected')
        global episode_nbr
        print (episode_nbr)
        for episode in range(0,number_of_episodes):
            if episode % 1 == 0:
                output_folder = 'Datasets/carla-sync/train/test_' + str(episode)
                if not os.path.exists(output_folder+"/cameras.p"):
                    # Start a new episode.
                    episode_nbr=episode
                    frame_step = 1  # Save one image every 100 frames
                    pointcloud_step=50
                    image_size = [800, 600]
                    camera_local_pos = [0.3, 0.0, 1.3]  # [X, Y, Z]
                    camera_local_rotation = [0, 0, 0]  # [pitch(Y), yaw(Z), roll(X)]
                    fov = 70
                    # Create a CarlaSettings object. This object is a wrapper around
                    # the CarlaSettings.ini file. Here we set the configuration we
                    # want for the new episode.
                    settings = CarlaSettings()
                    settings.set(
                        SynchronousMode=True,
                        SendNonPlayerAgentsInfo=True,
                        NumberOfVehicles=50,
                        NumberOfPedestrians=200,
                        WeatherId=random.choice([1, 3, 7, 8, 14]),
                        QualityLevel=args.quality_level)
                    settings.randomize_seeds()

                    # Now we want to add a couple of cameras to the player vehicle.
                    # We will collect the images produced by these cameras every
                    # frame.


                    camera1 = Camera('CameraDepth', PostProcessing='Depth', FOV=fov)
                    camera1.set_image_size(*image_size)
                    camera1.set_position(*camera_local_pos)
                    camera1.set_rotation(*camera_local_rotation)
                    settings.add_sensor(camera1)

                    camera2 = Camera('CameraRGB', PostProcessing='SceneFinal', FOV=fov)
                    camera2.set_image_size(*image_size)
                    camera2.set_position(*camera_local_pos)
                    camera2.set_rotation(*camera_local_rotation)
                    settings.add_sensor(camera2)

                    camera3 = Camera('CameraSeg', PostProcessing='SemanticSegmentation', FOV=fov)
                    camera3.set_image_size(*image_size)
                    camera3.set_position(*camera_local_pos)
                    camera3.set_rotation(*camera_local_rotation)
                    settings.add_sensor(camera3)



                    # Now we load these settings into the server. The server replies
                    # with a scene description containing the available start spots for
                    # the player. Here we can provide a CarlaSettings object or a
                    # CarlaSettings.ini file as string.
                    scene = client.load_settings(settings)

                    # Choose one player start at random.
                    number_of_player_starts = len(scene.player_start_spots)
                    player_start = episode#random.randint(0, max(0, number_of_player_starts - 1))

                    output_folder = 'Datasets/carla-sync/train/test_' + str(episode)
                    # Notify the server that we want to start the episode at the
                    # player_start index. This function blocks until the server is ready
                    # to start the episode.
                    print('Starting new episode...')
                    client.start_episode(player_start)


                    cameras_dict = {}
                    pedestrians_dict = {}
                    cars_dict = {}
                    # Compute the camera transform matrix
                    camera_to_car_transform = camera2.get_unreal_transform()
                    # (Intrinsic) (3, 3) K Matrix
                    K = np.identity(3)
                    K[0, 2] = image_size[0] / 2.0
                    K[1, 2] = image_size[1] / 2.0
                    K[0, 0] = K[1, 1] = image_size[0] / (2.0 * np.tan(fov * np.pi / 360.0))
                    with open(output_folder + '/camera_intrinsics.p', 'w') as camfile:
                        pickle.dump(K, camfile)


                    # Iterate every frame in the episode.
                    for frame in range(0, frames_per_episode):

                        # Read the data produced by the server this frame.
                        measurements, sensor_data = client.read_data()

                        # Print some of the measurements.
                        #print_measurements(measurements)
                        if not frame % frame_step:
                            # Save the images to disk if requested.

                            for name, measurement in sensor_data.items():
                                filename = args.out_filename_format.format(episode, name, frame)
                                print (filename)
                                measurement.save_to_disk(filename)

                            # We can access the encoded data of a given image as numpy
                            # array using its "data" property. For instance, to get the
                            # depth value (normalized) at pixel X, Y
                            #
                            #     depth_array = sensor_data['CameraDepth'].data
                            #     value_at_pixel = depth_array[Y, X]
                            #

                            # Now we have to send the instructions to control the vehicle.
                            # If we are in synchronous mode the server will pause the
                            # simulation until we send this control.

                            # RGB image [[[r,g,b],..[r,g,b]],..[[r,g,b],..[r,g,b]]]
                            image_RGB = to_rgb_array(sensor_data['CameraRGB'])

                            labels=labels_to_array(sensor_data['CameraSeg'])[:,:,np.newaxis]

                            image_seg = np.tile(labels, (1, 1, 3))
                            depth_array = sensor_data['CameraDepth'].data*1000


                            # 2d to (camera) local 3d
                            # We use the image_RGB to colorize each 3D point, this is optional.
                            # "max_depth" is used to keep only the points that are near to the
                            # camera, meaning 1.0 the farest points (sky)
                            if not frame % pointcloud_step:
                                point_cloud = depth_to_local_point_cloud(
                                    sensor_data['CameraDepth'],
                                    image_RGB,
                                    max_depth=args.far
                                )

                                point_cloud_seg = depth_to_local_point_cloud(
                                    sensor_data['CameraDepth'],
                                    segmentation=image_seg,
                                    max_depth=args.far
                                )

                            # (Camera) local 3d to world 3d.
                            # Get the transform from the player protobuf transformation.
                            world_transform = Transform(
                                measurements.player_measurements.transform
                            )

                            # Compute the final transformation matrix.
                            car_to_world_transform = world_transform * camera_to_car_transform

                            # Car to World transformation given the 3D points and the
                            # transformation matrix.
                            point_cloud.apply_transform(car_to_world_transform)
                            point_cloud_seg.apply_transform(car_to_world_transform)

                            Rt = car_to_world_transform.matrix
                            Rt_inv = car_to_world_transform.inverse().matrix
                            # R_inv=world_transform.inverse().matrix
                            cameras_dict[frame] = {}
                            cameras_dict[frame]['inverse_rotation'] = Rt_inv[:]
                            cameras_dict[frame]['rotation'] = Rt[:]
                            cameras_dict[frame]['translation'] = Rt_inv[0:3, 3]
                            cameras_dict[frame]['camera_to_car'] = camera_to_car_transform.matrix

                            # Get non-player info
                            vehicles = {}
                            pedestrians = {}
                            for agent in measurements.non_player_agents:
                                # check if the agent is a vehicle.
                                if agent.HasField('vehicle'):
                                    pos = agent.vehicle.transform.location
                                    pos_vector = np.array([[pos.x], [pos.y], [pos.z], [1.0]])

                                    trnasformed_3d_pos = np.dot(Rt_inv, pos_vector)
                                    pos2d = np.dot(K, trnasformed_3d_pos[:3])

                                    # Normalize the point
                                    norm_pos2d = np.array([
                                        pos2d[0] / pos2d[2],
                                        pos2d[1] / pos2d[2],
                                        pos2d[2]])

                                    # Now, pos2d contains the [x, y, d] values of the image in pixels (where d is the depth)
                                    # You can use the depth to know the points that are in front of the camera (positive depth).

                                    x_2d = image_size[0] - norm_pos2d[0]
                                    y_2d = image_size[1] - norm_pos2d[1]
                                    vehicles[agent.id] = {}
                                    vehicles[agent.id]['transform'] = [agent.vehicle.transform.location.x,
                                                                       agent.vehicle.transform.location.y,
                                                                       agent.vehicle.transform.location.z]
                                    vehicles[agent.id][
                                        'bounding_box.transform'] = agent.vehicle.bounding_box.transform.location.z

                                    vehicles[agent.id]['yaw'] = agent.vehicle.transform.rotation.yaw
                                    vehicles[agent.id]['bounding_box'] = [agent.vehicle.bounding_box.extent.x,
                                                                          agent.vehicle.bounding_box.extent.y,
                                                                          agent.vehicle.bounding_box.extent.z]
                                    vehicle_transform = Transform(agent.vehicle.bounding_box.transform)
                                    pos = agent.vehicle.transform.location

                                    bbox3d = agent.vehicle.bounding_box.extent

                                    # Compute the 3D bounding boxes
                                    # f contains the 4 points that corresponds to the bottom
                                    f = np.array([[pos.x + bbox3d.x, pos.y - bbox3d.y,
                                                   pos.z - bbox3d.z + agent.vehicle.bounding_box.transform.location.z],
                                                  [pos.x + bbox3d.x, pos.y + bbox3d.y,
                                                   pos.z - bbox3d.z + agent.vehicle.bounding_box.transform.location.z],
                                                  [pos.x - bbox3d.x, pos.y + bbox3d.y,
                                                   pos.z - bbox3d.z + agent.vehicle.bounding_box.transform.location.z],
                                                  [pos.x - bbox3d.x, pos.y - bbox3d.y,
                                                   pos.z - bbox3d.z + agent.vehicle.bounding_box.transform.location.z],
                                                  [pos.x + bbox3d.x, pos.y - bbox3d.y,
                                                   pos.z + bbox3d.z + agent.vehicle.bounding_box.transform.location.z],
                                                  [pos.x + bbox3d.x, pos.y + bbox3d.y,
                                                   pos.z + bbox3d.z + agent.vehicle.bounding_box.transform.location.z],
                                                  [pos.x - bbox3d.x, pos.y + bbox3d.y,
                                                   pos.z + bbox3d.z + agent.vehicle.bounding_box.transform.location.z],
                                                  [pos.x - bbox3d.x, pos.y - bbox3d.y,
                                                   pos.z + bbox3d.z + agent.vehicle.bounding_box.transform.location.z]])

                                    f_rotated = vehicle_transform.transform_points(f)
                                    f_2D_rotated = []
                                    vehicles[agent.id]['bounding_box_coord'] = f_rotated

                                    for i in range(f.shape[0]):
                                        point = np.array([[f_rotated[i, 0]], [f_rotated[i, 1]], [f_rotated[i, 2]], [1]])
                                        transformed_2d_pos = np.dot(Rt_inv, point)
                                        pos2d = np.dot(K, transformed_2d_pos[:3])
                                        norm_pos2d = np.array([
                                            pos2d[0] / pos2d[2],
                                            pos2d[1] / pos2d[2],
                                            pos2d[2]])
                                        # print([image_size[0] - (pos2d[0] / pos2d[2]), image_size[1] - (pos2d[1] / pos2d[2])])
                                        f_2D_rotated.append(
                                            np.array([image_size[0] - norm_pos2d[0], image_size[1] - norm_pos2d[1]]))
                                    vehicles[agent.id]['bounding_box_2D'] = f_2D_rotated


                                elif agent.HasField('pedestrian'):
                                    pedestrians[agent.id] = {}
                                    pedestrians[agent.id]['transform'] = [agent.pedestrian.transform.location.x,
                                                                          agent.pedestrian.transform.location.y,
                                                                          agent.pedestrian.transform.location.z]
                                    pedestrians[agent.id]['yaw'] = agent.pedestrian.transform.rotation.yaw
                                    pedestrians[agent.id]['bounding_box'] = [agent.pedestrian.bounding_box.extent.x,
                                                                             agent.pedestrian.bounding_box.extent.y,
                                                                             agent.pedestrian.bounding_box.extent.z]
                                    # get the needed transformations
                                    # remember to explicitly make it Transform() so you can use transform_points()
                                    pedestrian_transform = Transform(agent.pedestrian.transform)
                                    bbox_transform = Transform(agent.pedestrian.bounding_box.transform)

                                    # get the box extent
                                    ext = agent.pedestrian.bounding_box.extent
                                    # 8 bounding box vertices relative to (0,0,0)
                                    bbox = np.array([
                                        [  ext.x,   ext.y,   ext.z],
                                        [- ext.x,   ext.y,   ext.z],
                                        [  ext.x, - ext.y,   ext.z],
                                        [- ext.x, - ext.y,   ext.z],
                                        [  ext.x,   ext.y, - ext.z],
                                        [- ext.x,   ext.y, - ext.z],
                                        [  ext.x, - ext.y, - ext.z],
                                        [- ext.x, - ext.y, - ext.z]
                                    ])

                                    # transform the vertices respect to the bounding box transform
                                    bbox = bbox_transform.transform_points(bbox)

                                    # the bounding box transform is respect to the pedestrian transform
                                    # so let's transform the points relative to it's transform
                                    bbox = pedestrian_transform.transform_points(bbox)

                                    # pedestrian's transform is relative to the world, so now,
                                    # bbox contains the 3D bounding box vertices relative to the world
                                    pedestrians[agent.id]['bounding_box_coord'] =copy.deepcopy(bbox)

                                    # Additionally, you can print these vertices to check that is working
                                    f_2D_rotated=[]
                                    ys=[]
                                    xs=[]
                                    zs=[]
                                    for vertex in bbox:
                                        pos_vector = np.array([
                                            [vertex[0,0]],  # [[X,
                                            [vertex[0,1]],  #   Y,
                                            [vertex[0,2]],  #   Z,
                                            [1.0]           #   1.0]]
                                        ])

                                        # transform the points to camera
                                        transformed_3d_pos =np.dot(Rt_inv, pos_vector)# np.dot(inv(self._extrinsic.matrix), pos_vector)
                                        zs.append(transformed_3d_pos[2])
                                        # transform the points to 2D
                                        pos2d =np.dot(K, transformed_3d_pos[:3]) #np.dot(self._intrinsic, transformed_3d_pos[:3])

                                        # normalize the 2D points
                                        pos2d = np.array([
                                            pos2d[0] / pos2d[2],
                                            pos2d[1] / pos2d[2],
                                            pos2d[2]
                                        ])

                                        # print the points in the screen
                                        if pos2d[2] > 0: # if the point is in front of the camera
                                            x_2d = image_size[0]-pos2d[0]#WINDOW_WIDTH - pos2d[0]
                                            y_2d = image_size[1]-pos2d[1]#WINDOW_HEIGHT - pos2d[1]
                                            ys.append(y_2d)
                                            xs.append(x_2d)
                                            f_2D_rotated.append( (y_2d, x_2d))
                                    if len(xs)>1:
                                        bbox=[[int(min(xs)), int(max(xs))],[int(min(ys)), int(max(ys))]]
                                        clipped_seg=labels[bbox[1][0]:bbox[1][1],bbox[0][0]:bbox[0][1]]
                                        recounted = Counter(clipped_seg.flatten())


                                        if 4 in recounted.keys() and recounted[4]>0.1*len(clipped_seg.flatten()):
                                            clipped_depth=depth_array[bbox[1][0]:bbox[1][1],bbox[0][0]:bbox[0][1]]
                                            #print (clipped_depth.shape)
                                            people_indx=np.where(clipped_seg==4)
                                            masked_depth=[]
                                            for people in zip(people_indx[0],people_indx[1] ):
                                                #print(people)
                                                masked_depth.append(clipped_depth[people])
                                            #masked_depth=clipped_depth[np.where(clipped_seg==4)]
                                            #print (masked_depth)
                                            #print ("Depth "+ str(min(zs))+" "+ str(max(zs)))
                                            #recounted = Counter(masked_depth)
                                            #print(recounted)
                                            avg_depth=np.mean(masked_depth)
                                            if avg_depth<700 and avg_depth>=min(zs)-10 and avg_depth<= max(zs)+10:
                                                #print("Correct depth")
                                                pedestrians[agent.id]['bounding_box_2D'] = f_2D_rotated
                                                pedestrians[agent.id]['bounding_box_2D_size']=recounted[4]
                                                pedestrians[agent.id]['bounding_box_2D_avg_depth']=avg_depth
                                                pedestrians[agent.id]['bounding_box_2D_depths']=zs
                                                #print ( pedestrians[agent.id].keys())
                                            #else:
                                                # print(recounted)
                                                # print ("Depth "+ str(min(zs))+" "+ str(max(zs)))


                                    #if sum(norm(depth_array-np.mean(zs))<1.0):


                                    # pedestrians[agent.id] = {}
                                    # pedestrians[agent.id]['transform'] = [agent.pedestrian.transform.location.x,
                                    #                                       agent.pedestrian.transform.location.y,
                                    #                                       agent.pedestrian.transform.location.z]
                                    # pedestrians[agent.id]['yaw'] = agent.pedestrian.transform.rotation.yaw
                                    # pedestrians[agent.id]['bounding_box'] = [agent.pedestrian.bounding_box.extent.x,
                                    #                                          agent.pedestrian.bounding_box.extent.y,
                                    #                                          agent.pedestrian.bounding_box.extent.z]
                                    # vehicle_transform = Transform(agent.pedestrian.bounding_box.transform)
                                    # pos = agent.pedestrian.transform.location
                                    #
                                    # bbox3d = agent.pedestrian.bounding_box.extent
                                    #
                                    # # Compute the 3D bounding boxes
                                    # # f contains the 4 points that corresponds to the bottom
                                    # f = np.array([[pos.x + bbox3d.x, pos.y - bbox3d.y, pos.z- bbox3d.z ],
                                    #               [pos.x + bbox3d.x, pos.y + bbox3d.y, pos.z- bbox3d.z ],
                                    #               [pos.x - bbox3d.x, pos.y + bbox3d.y, pos.z- bbox3d.z ],
                                    #               [pos.x - bbox3d.x, pos.y - bbox3d.y, pos.z- bbox3d.z ],
                                    #               [pos.x + bbox3d.x, pos.y - bbox3d.y, pos.z + bbox3d.z],
                                    #               [pos.x + bbox3d.x, pos.y + bbox3d.y, pos.z + bbox3d.z],
                                    #               [pos.x - bbox3d.x, pos.y + bbox3d.y, pos.z + bbox3d.z],
                                    #               [pos.x - bbox3d.x, pos.y - bbox3d.y, pos.z + bbox3d.z]])
                                    #
                                    # f_rotated = pedestrian_transform.transform_points(f)
                                    # pedestrians[agent.id]['bounding_box_coord'] = f_rotated
                                    # f_2D_rotated = []
                                    #
                                    # for i in range(f.shape[0]):
                                    #     point = np.array([[f_rotated[i, 0]], [f_rotated[i, 1]], [f_rotated[i, 2]], [1]])
                                    #     transformed_2d_pos = np.dot(Rt_inv, point)
                                    #     pos2d = np.dot(K, transformed_2d_pos[:3])
                                    #     norm_pos2d = np.array([
                                    #         pos2d[0] / pos2d[2],
                                    #         pos2d[1] / pos2d[2],
                                    #         pos2d[2]])
                                    #     f_2D_rotated.append([image_size[0] - norm_pos2d[0], image_size[1] - norm_pos2d[1]])
                                    # pedestrians[agent.id]['bounding_box_2D'] = f_2D_rotated

                            cars_dict[frame] = vehicles

                            pedestrians_dict[frame] = pedestrians
                            #print("End of Episode")
                            #print(len(pedestrians_dict[frame]))

                            # Save PLY to disk
                            # This generates the PLY string with the 3D points and the RGB colors
                            # for each row of the file.
                            if not frame % pointcloud_step:
                                point_cloud.save_to_disk(os.path.join(
                                    output_folder, '{:0>5}.ply'.format(frame))
                                )
                                point_cloud_seg.save_to_disk(os.path.join(
                                    output_folder, '{:0>5}_seg.ply'.format(frame))
                                )

                        if not args.autopilot:

                            client.send_control(
                                hand_brake=True)

                        else:

                            # Together with the measurements, the server has sent the
                            # control that the in-game autopilot would do this frame. We
                            # can enable autopilot by sending back this control to the
                            # server. We can modify it if wanted, here for instance we
                            # will add some noise to the steer.

                            control = measurements.player_measurements.autopilot_control
                            control.steer += random.uniform(-0.1, 0.1)
                            client.send_control(control)
                    print ("Start pickle save")
                    with open(output_folder + '/cameras.p', 'w') as camerafile:
                        pickle.dump(cameras_dict, camerafile)
                    with open(output_folder + '/people.p', 'w') as peoplefile:
                        pickle.dump(pedestrians_dict, peoplefile)
                    with open(output_folder + '/cars.p', 'w') as carfile:
                        pickle.dump(cars_dict, carfile)
                    print ("Episode done")
示例#21
0
def run_carla_client(args):
    # Here we will run args._episode episodes with args._frames frames each.
    number_of_episodes = args._episode
    frames_per_episode = args._frames

    # set speed and yaw rate variable to default
    speedyawrate = np.nan

    #call init in eval file to load model
    checkpoint_dir_loc = args.chckpt_loc
    prefix = args.model_name
    tf_carla_eval.init(checkpoint_dir_loc, prefix)

    # create the carla client
    with make_carla_client(args.host, args.port, timeout=100000) as client:
        print('CarlaClient connected')

        for episode in range(0, number_of_episodes):

            if args.settings_filepath is None:

                # if same starting position arg set use same weather
                if args._sameStart > -1:
                    choice = 1
                    nrVehicles = 0
                    nrPedestrians = 0
                else:
                    choice = random.choice([1, 3, 7, 8, 14])
                    nrVehicles = 150
                    nrPedestrians = 100

                settings = CarlaSettings()
                settings.set(SynchronousMode=args.synchronous_mode,
                             SendNonPlayerAgentsInfo=True,
                             NumberOfVehicles=nrVehicles,
                             NumberOfPedestrians=nrPedestrians,
                             WeatherId=choice,
                             QualityLevel=args.quality_level)
                settings.randomize_seeds()

                camera0 = Camera('CameraRGB')
                camera0.set_image_size(1920, 640)
                camera0.set_position(2.00, 0, 1.30)
                settings.add_sensor(camera0)

                camera1 = Camera('CameraDepth', PostProcessing='Depth')
                camera1.set_image_size(1920, 640)
                camera1.set_position(2.00, 0, 1.30)
                settings.add_sensor(camera1)

                camera2 = Camera('CameraSegmentation',
                                 PostProcessing='SemanticSegmentation')
                camera2.set_image_size(1920, 640)
                camera2.set_position(2.00, 0, 1.30)
                settings.add_sensor(camera2)

                if args._save:
                    camera3 = Camera('CameraRGB_top',
                                     PostProcessing='SceneFinal')
                    camera3.set_image_size(800, 800)
                    camera3.set_position(-6.0, 0, 4.0)
                    camera3.set_rotation(yaw=0, roll=0, pitch=-10)
                    settings.add_sensor(camera3)

            else:
                with open(args.settings_filepath, 'r') as fp:
                    settings = fp.read()

            scene = client.load_settings(settings)

            # Choose one player start at random.
            number_of_player_starts = len(scene.player_start_spots)
            if args._sameStart > -1:
                player_start = args._sameStart
            else:
                player_start = random.randint(
                    0, max(0, number_of_player_starts - 1))

            print('Starting new episode at %r...' % scene.map_name)
            client.start_episode(player_start)

            if args._save:
                # initialize stuck variable. if car does not move after colliding for x frames we restart episode.
                nrStuck = 0
                # maximum number of times we can get stuck before restarting
                maxStuck = 5

                # last location variable to keep track if car is changing position
                last_loc = namedtuple('last_loc', 'x y z')
                last_loc.__new__.__defaults__ = (0.0, 0.0, 0.0)
                last_loc.x = -1
                last_loc.y = -1

                # delete frames of previous run to not interfere with video creation
                for rmf in glob.glob(args.file_loc + 'tmpFrameFolder/frame*'):
                    os.remove(rmf)

            # Iterate every frame in the episode.
            for frame in range(0, frames_per_episode):

                measurements, sensor_data = client.read_data()

                # if we are saving video of episode move to next episode if we get stuck
                if args._save:
                    player_measurements = measurements.player_measurements
                    col_other = player_measurements.collision_other
                    col_cars = player_measurements.collision_vehicles
                    curr_loc = player_measurements.transform.location
                    if player_measurements.forward_speed <= 0.05 and sqrdist(
                            last_loc, curr_loc) < 2 and frame > 30:
                        if nrStuck == maxStuck:
                            print(
                                "\nWe are stuck! Writing to video and restarting."
                            )
                            #args._sameStart += 1
                            break
                        else:
                            print("Stuck: " + str(nrStuck) + "/" +
                                  str(maxStuck))
                            nrStuck += 1
                    last_loc = curr_loc

                # Skip first couple of images due to setup time.
                if frame > 19:
                    player_measurements = measurements.player_measurements

                    for name, curr_measurements in sensor_data.items():
                        if name == 'CameraDepth':
                            depth = curr_measurements.return_depth_map()
                        if name == 'CameraSegmentation':
                            segmentation = curr_measurements.return_segmentation_map(
                            )
                        if name == 'CameraRGB':
                            rgb = curr_measurements.return_rgb()

                    yaw, yaw_rate, speed, prev_time = process_odometry(
                        measurements, yaw_shift, yaw_old, prev_time)
                    yaw_old = yaw

                    im = Image.new('L', (256 * 6, 256 * 6), (127))
                    shift = 256 * 6 / 2
                    draw = ImageDraw.Draw(im)

                    gmTime = time.time()
                    for agent in measurements.non_player_agents:
                        if agent.HasField('vehicle') or agent.HasField(
                                'pedestrian'):
                            participant = agent.vehicle if agent.HasField(
                                'vehicle') else agent.pedestrian
                            if not participantInRange(
                                    participant.transform.location,
                                    player_measurements.transform.location):
                                continue
                            angle = cart2pol(
                                participant.transform.orientation.x,
                                participant.transform.orientation.y)
                            polygon = agent2world(participant, angle)
                            polygon = world2player(
                                polygon, math.radians(-yaw),
                                player_measurements.transform)
                            polygon = player2image(polygon,
                                                   shift,
                                                   multiplier=25)
                            polygon = [tuple(row) for row in polygon.T]

                            draw.polygon(polygon, 0, 0)

                    im = im.resize((256, 256), Image.ANTIALIAS)
                    im = im.rotate(imrotate)
                    gmTime = time.time() - gmTime
                    if not args.all_data:
                        print("only all data supported for now")
                        exit()
                    else:
                        if args._opd:
                            ppTime, evTime, imTime, tkTime, speedyawrate, direction = tf_carla_eval.eval_only_drive(
                                im, rgb, depth, segmentation, -yaw_rate, speed)
                        else:
                            ppTime, evTime, imTime, tkTime, speedyawrate, direction, imReady, im = tf_carla_eval.eval(
                                im, rgb, depth, segmentation, -yaw_rate, speed)
                            if args._save and imReady:
                                # append frame to array for video output
                                for name, curr_measurements in sensor_data.items(
                                ):
                                    if name == 'CameraRGB_top':
                                        filename = args.file_loc + "tmpFrameFolder/frame" + str(
                                            frame).zfill(4) + ".jpg"
                                        Image.fromarray(
                                            np.concatenate([
                                                im,
                                                curr_measurements.return_rgb()
                                            ], 1)).save(filename)
                                        break

                        #printTimePerEval(gmTime, ppTime, evTime, imTime, tkTime)

                else:
                    # get first values
                    yaw_shift = measurements.player_measurements.transform.rotation.yaw
                    yaw_old = ((yaw_shift - 180) % 360) - 180
                    imrotate = round(yaw_old) + 90
                    shift_x = measurements.player_measurements.transform.location.x
                    shift_y = measurements.player_measurements.transform.location.y
                    prev_time = np.int64(measurements.game_timestamp)

                if not args.autopilot:
                    control = getKeyboardControl()
                else:
                    # values are nan while script is gathering frames before first prediction
                    if np.any(np.isnan(speedyawrate)):
                        control = measurements.player_measurements.autopilot_control
                        control.steer += random.uniform(-0.01, 0.01)
                    else:
                        control = VehicleControl()
                        # speedyawrate contains T entries, first entry is first prediction
                        dirAvgEncoding = np.mean(
                            np.squeeze(np.array(direction)), 0)
                        speedyawrate = np.asarray(speedyawrate)
                        steerAvgEncoding = np.mean(np.squeeze(speedyawrate), 0)
                        control.throttle = mapThrottle(player_measurements,
                                                       speedyawrate)
                        control.brake = int(control.throttle == -1)
                        control.steer = mapSteer(steerAvgEncoding[1])
                        #printSteering(measurements.player_measurements.forward_speed,
                        #              measurements.player_measurements.autopilot_control.throttle,
                        #              measurements.player_measurements.autopilot_control.steer,
                        #              speedyawrate, control.throttle, control.steer,
                        #              dirAvgEncoding, steerAvgEncoding)

                client.send_control(control)
            if args._save:
                print("\nConverting frames to video...")
                os.system(
                    "ffmpeg -r 10 -f image2 -start_number 20 -i {}frame%04d.jpg -vcodec libx264 -crf 15 -pix_fmt yuv420p {}"
                    .format(
                        args.file_loc + "tmpFrameFolder/", args.file_loc +
                        "video" + str(time.time() * 1000) + ".mp4"))
                print("Finished conversion.")
def build_eccv_navigation_dynamic():
    def _poses_town01():
        """
        Each matrix is a new task. We have all the four tasks

        """

        def _poses_navigation():
            return [[36, 40], [39, 35]]
            #return [[105, 29], [27, 130], [102, 87], [132, 27], [24, 44],
            #        [96, 26], [34, 67], [28, 1], [140, 134], [105, 9],
            #        [148, 129], [65, 18], [21, 16], [147, 97], [42, 51],
            #        [30, 41], [18, 107], [69, 45], [102, 95], [18, 145],
            #        [111, 64], [79, 45], [84, 69], [73, 31], [37, 81]]

        return [_poses_navigation()]

    def _poses_town02():


        def _poses_navigation():
            return [[38, 34], [4, 2]]
            #return [[19, 66], [79, 14], [19, 57], [23, 1],
            #        [53, 76], [42, 13], [31, 71], [33, 5],
            #        [54, 30], [10, 61], [66, 3], [27, 12],
            #        [79, 19], [2, 29], [16, 14], [5, 57],
            #        [70, 73], [46, 67], [57, 50], [61, 49], [21, 12],
            #        [51, 81], [77, 68], [56, 65], [43, 54]]

        return [_poses_navigation()]

    # We check the town, based on that we define the town related parameters
    # The size of the vector is related to the number of tasks, inside each
    # task there is also multiple poses ( start end, positions )

    exp_set_dict = {
        'Name': 'eccv_navigation_dynamic',
        'Town01': {'poses': _poses_town01(),
                   'vehicles': [20],
                   'pedestrians': [50],
                   'weathers_train': [1],
                   'weathers_validation': []

                   },
        'Town02': {'poses': _poses_town02(),
                   'vehicles': [15],
                   'pedestrians': [50],
                   'weathers_train': [],
                   'weathers_validation': [14]

                   }
    }

    # We set the camera
    # This single RGB camera is used on every experiment
    camera = Camera('rgb')
    camera.set(FOV=100)
    camera.set_image_size(800, 600)
    camera.set_position(2.0, 0.0, 1.4)
    camera.set_rotation(-15.0, 0, 0)
    sensor_set = [camera]

    return _build_experiments(exp_set_dict, sensor_set), exp_set_dict
示例#23
0
    def build_experiments(self):
        """
            Creates the whole set of experiment objects,
            The experiments created depends on the selected Town.

        """

        # We check the town, based on that we define the town related parameters
        # The size of the vector is related to the number of tasks, inside each
        # task there is also multiple poses ( start end, positions )
        if self._city_name == 'Town01':
            #poses_tasks = [[[7, 3]], [[138, 17]], [[140, 134]], [[140, 134]]]
            poses_tasks = [[[138, 134]]]
            vehicles_tasks = [20]
            pedestrians_tasks = [10]
        else:
            right_curves = [[[1, 56], [65, 69], [78, 51], [44, 61], [40, 17],
                             [71, 16], [74, 38], [46, 12], [19, 18], [26, 74],
                             [37, 76], [11, 44], [20, 6], [10, 22], [28, 2],
                             [5, 15], [14, 33], [34, 8]]]
            left_curves = [[[57, 82], [72, 43], [52, 79], [70, 66], [43, 14],
                            [11, 47], [79, 32], [37, 75], [75, 16], [26, 73],
                            [39, 5], [2, 37], [34, 13], [6, 35], [10, 19],
                            [23, 6], [5, 30], [16, 2]]]
            special_test = [[[19, 66], [79, 14], [42, 13], [31, 71], [54, 30],
                             [10, 61], [66, 3], [27, 12], [2, 29], [16, 14],
                             [70, 73], [46, 67], [51, 81], [56, 65], [43, 54]]]
            corl_task2 = [[[19, 66], [79, 14], [19, 57], [23, 1], [53, 76],
                           [42, 13], [31, 71], [33, 5], [54, 30], [10, 61],
                           [66, 3], [27, 12], [79, 19], [2, 29], [16, 14],
                           [5, 57], [70, 73], [46, 67], [57, 50], [61, 49],
                           [21, 12], [51, 81], [77, 68], [56, 65], [43, 54]]]

            #poses_tasks = corl_task2
            poses_tasks = [[[10, 19]]]
            vehicles_tasks = [0]  #*len(poses_tasks[0])
            pedestrians_tasks = [0]  #*len(poses_tasks[0])

        # We set the camera
        # This single RGB camera is used on every experiment
        '''camera = Camera('CameraRGB')
        camera.set(FOV=90)
        camera.set_image_size(800, 600)
        camera.set_position(1.44, 0.0, 1.2)
        camera.set_rotation(0, 0, 0)'''

        camera0 = Camera('CameraRGB0')
        camera0.set(FOV=90)
        camera0.set_image_size(400, 300)
        camera0.set_position(1.7, 0.0, 1.3)
        camera0.set_rotation(0, 0, 0)

        camera1 = Camera('CameraRGB1')
        camera1.set(FOV=90)
        camera1.set_image_size(400, 300)
        camera1.set_position(1.7, 0.0, 1.3)
        camera1.set_rotation(0, -45, 0)

        camera2 = Camera('CameraRGB2')
        camera2.set(FOV=90)
        camera2.set_image_size(400, 300)
        camera2.set_position(1.7, 0.0, 1.3)
        camera2.set_rotation(0, +45, 0)

        # Based on the parameters, creates a vector with experiment objects.
        experiments_vector = []
        for weather in self.weathers:

            for iteration in range(len(poses_tasks)):
                poses = poses_tasks[iteration]
                vehicles = vehicles_tasks[iteration]
                pedestrians = pedestrians_tasks[iteration]

                conditions = CarlaSettings()
                conditions.set(SendNonPlayerAgentsInfo=True,
                               NumberOfVehicles=vehicles,
                               NumberOfPedestrians=pedestrians,
                               WeatherId=weather)
                # Add all the cameras that were set for this experiments
                conditions.add_sensor(camera0)
                conditions.add_sensor(camera1)
                conditions.add_sensor(camera2)
                experiment = Experiment()
                experiment.set(Conditions=conditions,
                               Poses=poses,
                               Task=iteration,
                               Repetitions=1)
                experiments_vector.append(experiment)
            '''path0=os.listdir('CameraRGB0')
            path1=os.listdir('CameraRGB1')
            path2=os.listdir('CameraRGB2')
            os.mkdir('CameraRGB')

            width=400
            height=300

            total_width = width*3
            max_height = height

            i = 0
            images=[]
            images.append(Image.open('CameraRGB1/'+path1[i]))
            images.append(Image.open('CameraRGB0/'+path0[i]))
            images.append(Image.open('CameraRGB2/'+path2[i]))
            new_im = Image.new('RGB', (total_width, max_height))
            x_offset = 0
            for im in images:
                new_im.paste(im, (x_offset,0))
                x_offset += im.size[0]
                new_im.save('CameraRGB/'+path0[i])
            i = i+1'''

        return experiments_vector
示例#24
0
    def _reset(self):
        self.num_steps = 0
        self.total_reward = 0
        self.prev_measurement = None
        self.prev_image = None
        self.episode_id = datetime.today().strftime("%Y-%m-%d_%H-%M-%S_%f")
        self.measurements_file = None

        self.pre_intersection = np.array([0.0, 0.0])

        # Create a CarlaSettings object. This object is a wrapper around
        # the CarlaSettings.ini file. Here we set the configuration we
        # want for the new episode.
        settings = CarlaSettings()
        self.scenario = random.choice(self.config["scenarios"])
        assert self.scenario["city"] == self.city, (self.scenario, self.city)
        self.weather = random.choice(self.scenario["weather_distribution"])
        settings.set(
            SynchronousMode=True,
            # ServerTimeOut=10000, # CarlaSettings: no key named 'ServerTimeOut'
            SendNonPlayerAgentsInfo=True,
            NumberOfVehicles=self.scenario["num_vehicles"],
            NumberOfPedestrians=self.scenario["num_pedestrians"],
            WeatherId=self.weather)
        settings.randomize_seeds()

        if self.config["use_sensor"] == 'use_semantic':
            camera0 = Camera("CameraSemantic",
                             PostProcessing="SemanticSegmentation")
            camera0.set_image_size(self.config["render_x_res"],
                                   self.config["render_y_res"])
            # camera0.set_position(30, 0, 130)
            camera0.set(FOV=120)
            camera0.set_position(2.0, 0.0, 1.4)
            camera0.set_rotation(0.0, 0.0, 0.0)

            settings.add_sensor(camera0)

        if self.config["use_sensor"] in ['use_depth', 'use_logdepth']:
            camera1 = Camera("CameraDepth", PostProcessing="Depth")
            camera1.set_image_size(self.config["render_x_res"],
                                   self.config["render_y_res"])
            # camera1.set_position(30, 0, 130)
            camera1.set(FOV=120)
            camera1.set_position(2.0, 0.0, 1.4)
            camera1.set_rotation(0.0, 0.0, 0.0)

            settings.add_sensor(camera1)

        if self.config["use_sensor"] == 'use_rgb':
            camera2 = Camera("CameraRGB")
            camera2.set_image_size(self.config["render_x_res"],
                                   self.config["render_y_res"])
            # camera2.set_position(30, 0, 130)
            # camera2.set_position(0.3, 0.0, 1.3)
            camera2.set(FOV=120)
            camera2.set_position(2.0, 0.0, 1.4)
            camera2.set_rotation(0.0, 0.0, 0.0)

            settings.add_sensor(camera2)

        if self.config["use_sensor"] == 'use_2rgb':
            camera_l = Camera("CameraRGB_L")
            camera_l.set_image_size(self.config["render_x_res"],
                                    self.config["render_y_res"])
            camera_l.set(FOV=120)
            camera_l.set_position(2.0, -0.1, 1.4)
            camera_l.set_rotation(0.0, 0.0, 0.0)
            settings.add_sensor(camera_l)

            camera_r = Camera("CameraRGB_R")
            camera_r.set_image_size(self.config["render_x_res"],
                                    self.config["render_y_res"])
            camera_r.set(FOV=120)
            camera_r.set_position(2.0, 0.1, 1.4)
            camera_r.set_rotation(0.0, 0.0, 0.0)
            settings.add_sensor(camera_r)

        # Setup start and end positions
        scene = self.client.load_settings(settings)
        self.positions = positions = scene.player_start_spots
        self.start_pos = positions[self.scenario["start_pos_id"]]

        self.pre_pos = self.start_pos.location
        self.cnt1 = 0
        self.displacement = 1000.0

        self.end_pos = positions[self.scenario["end_pos_id"]]
        self.start_coord = [
            self.start_pos.location.x // 1, self.start_pos.location.y // 1
        ]
        self.end_coord = [
            self.end_pos.location.x // 1, self.end_pos.location.y // 1
        ]
        print("Start pos {} ({}), end {} ({})".format(
            self.scenario["start_pos_id"], self.start_coord,
            self.scenario["end_pos_id"], self.end_coord))

        # Notify the server that we want to start the episode at the
        # player_start index. This function blocks until the server is ready
        # to start the episode.
        print("Starting new episode...")
        self.client.start_episode(self.scenario["start_pos_id"])

        # remove the vehicle dropping when starting a new episode.
        cnt = 1
        z1 = 0
        zero_control = VehicleControl()
        while (cnt < 3):
            self.client.send_control(
                zero_control
            )  # VehicleControl().steer = 0, VehicleControl().throttle = 0, VehicleControl().reverse = False
            z0 = z1
            z1 = self.client.read_data(
            )[0].player_measurements.transform.location.z
            print(z1)
            if z0 - z1 == 0:
                cnt += 1
        print('Starting new episode done.\n')

        # Process observations: self._read_observation() returns image and py_measurements.
        image, py_measurements = self._read_observation()
        self.prev_measurement = py_measurements

        # self.current_heading = self.pre_heading = np.array([py_measurements["x_orient"], py_measurements["y_orient"]])
        # self.angular_speed = 0.0

        self.pre_heading_degree = self.current_heading_degree = py_measurements[
            "current_heading_degree"]
        self.angular_speed_degree = np.array(0.0)

        return self.encode_obs(self.preprocess_image(image),
                               py_measurements), py_measurements
示例#25
0
    def build_experiments(self):
        """
        Creates the whole set of experiment objects,
        The experiments created depend on the selected Town.
        """

        # We set the camera
        # This single RGB camera is used on every experiment

        camera = Camera('CameraRGB')
        camera.set(FOV=100)
        camera.set_image_size(800, 600)
        camera.set_position(2.0, 0.0, 1.4)
        camera.set_rotation(-15.0, 0, 0)

        if self._city_name == 'Town01':

            if self._subset == 'keep_lane':
                poses_tasks = [self._poses_town01()[0]]
                vehicles_tasks = [0]
                pedestrians_tasks = [0]

            elif self._subset == 'one_turn':
                poses_tasks = [self._poses_town01()[1]]
                vehicles_tasks = [0]
                pedestrians_tasks = [0]

            elif self._subset == 'keep_lane_one_turn':
                poses_tasks = self._poses_town01()[:2]
                vehicles_tasks = [0, 0]
                pedestrians_tasks = [0, 0]

            elif self._subset == 'no_dynamic_objects':
                poses_tasks = self._poses_town01()[:3]
                vehicles_tasks = [0, 0, 0]
                pedestrians_tasks = [0, 0, 0]

            elif self._subset is None:
                poses_tasks = self._poses_town01()
                vehicles_tasks = [0, 0, 0, 20]
                pedestrians_tasks = [0, 0, 0, 50]

            else:
                raise ValueError(
                    "experiments-subset must be keep_lane or keep_lane_one_turn or no_dynamic_objects or None"
                )

        else:
            raise ValueError("city must be Town01 for training")

        experiments_vector = []

        for i, iteration in enumerate(range(len(poses_tasks))):

            for weather in self.weathers:

                poses = poses_tasks[iteration]
                vehicles = vehicles_tasks[iteration]
                pedestrians = pedestrians_tasks[iteration]

                conditions = CarlaSettings()
                conditions.set(SendNonPlayerAgentsInfo=True,
                               NumberOfVehicles=vehicles,
                               NumberOfPedestrians=pedestrians,
                               WeatherId=weather)
                # Add all the cameras that were set for this experiments

                conditions.add_sensor(camera)

                experiment = Experiment()
                experiment.set(Conditions=conditions,
                               Poses=poses,
                               Task=iteration,
                               Repetitions=1)

                experiments_vector.append(experiment)

        return experiments_vector
示例#26
0
def run_carla_client(host, port, far):
    # Here we will run a single episode with 300 frames.
    number_of_frames = 3000
    frame_step = 100  # Save one image every 100 frames
    output_folder = '_out'
    image_size = [800, 600]
    camera_local_pos = [0.3, 0.0, 1.3] # [X, Y, Z]
    camera_local_rotation = [0, 0, 0]  # [pitch(Y), yaw(Z), roll(X)]
    fov = 70

    # Connect with the server
    with make_carla_client(host, port) as client:
        print('CarlaClient connected')

        # Here we load the settings.
        settings = CarlaSettings()
        settings.set(
            SynchronousMode=True,
            SendNonPlayerAgentsInfo=False,
            NumberOfVehicles=20,
            NumberOfPedestrians=40,
            WeatherId=random.choice([1, 3, 7, 8, 14]))
        settings.randomize_seeds()

        camera1 = Camera('CameraDepth', PostProcessing='Depth', FOV=fov)
        camera1.set_image_size(*image_size)
        camera1.set_position(*camera_local_pos)
        camera1.set_rotation(*camera_local_rotation)
        settings.add_sensor(camera1)

        camera2 = Camera('CameraRGB', PostProcessing='SceneFinal', FOV=fov)
        camera2.set_image_size(*image_size)
        camera2.set_position(*camera_local_pos)
        camera2.set_rotation(*camera_local_rotation)
        settings.add_sensor(camera2)

        client.load_settings(settings)

        # Start at location index id '0'
        client.start_episode(0)

        # Compute the camera transform matrix
        camera_to_car_transform = camera2.get_unreal_transform()

        # Iterate every frame in the episode except for the first one.
        for frame in range(1, number_of_frames):
            # Read the data produced by the server this frame.
            measurements, sensor_data = client.read_data()

            # Save one image every 'frame_step' frames
            if not frame % frame_step:
                # Start transformations time mesure.
                timer = StopWatch()

                # RGB image [[[r,g,b],..[r,g,b]],..[[r,g,b],..[r,g,b]]]
                image_RGB = to_rgb_array(sensor_data['CameraRGB'])

                # 2d to (camera) local 3d
                # We use the image_RGB to colorize each 3D point, this is optional.
                # "max_depth" is used to keep only the points that are near to the
                # camera, meaning 1.0 the farest points (sky)
                point_cloud = depth_to_local_point_cloud(
                    sensor_data['CameraDepth'],
                    image_RGB,
                    max_depth=far
                )

                # (Camera) local 3d to world 3d.
                # Get the transform from the player protobuf transformation.
                world_transform = Transform(
                    measurements.player_measurements.transform
                )

                # Compute the final transformation matrix.
                car_to_world_transform = world_transform * camera_to_car_transform

                # Car to World transformation given the 3D points and the
                # transformation matrix.
                point_cloud.apply_transform(car_to_world_transform)

                # End transformations time mesure.
                timer.stop()

                # Save PLY to disk
                # This generates the PLY string with the 3D points and the RGB colors
                # for each row of the file.
                point_cloud.save_to_disk(os.path.join(
                    output_folder, '{:0>5}.ply'.format(frame))
                )

                print_message(timer.milliseconds(), len(point_cloud), frame)

            client.send_control(
                measurements.player_measurements.autopilot_control
            )
示例#27
0
def run_carla_client(host, port, far):
    # Here we will run a single episode with 300 frames.
    number_of_frames = 30
    frame_step = 10  # Save one image every 100 frames

    image_size = [800, 600]
    camera_local_pos = [0.3, 0.0, 1.3]  # [X, Y, Z]
    camera_local_rotation = [0, 0, 0]  # [pitch(Y), yaw(Z), roll(X)]
    fov = 70
    autopilot = False
    control = VehicleControl()
    for start_i in range(150):
        output_folder = '/home2/mariap/Packages/CARLA_0.8.2/PythonClient/_out/pos_' + str(
            start_i)
        if not os.path.exists(output_folder):
            os.mkdir(output_folder)
            print("make " + str(output_folder))

    # Connect with the server
    with make_carla_client(host, port) as client:
        print('CarlaClient connected')
        for start_i in range(150):
            output_folder = '/home2/mariap/Packages/CARLA_0.8.2/PythonClient/_out/pos_' + str(
                start_i)
            print(output_folder)

            # Here we load the settings.
            settings = CarlaSettings()
            settings.set(SynchronousMode=True,
                         SendNonPlayerAgentsInfo=True,
                         NumberOfVehicles=100,
                         NumberOfPedestrians=500,
                         WeatherId=random.choice([1, 3, 7, 8, 14]))
            settings.randomize_seeds()

            camera1 = Camera('CameraDepth', PostProcessing='Depth', FOV=fov)
            camera1.set_image_size(*image_size)
            camera1.set_position(*camera_local_pos)
            camera1.set_rotation(*camera_local_rotation)
            settings.add_sensor(camera1)

            camera2 = Camera('CameraRGB', PostProcessing='SceneFinal', FOV=fov)
            camera2.set_image_size(*image_size)
            camera2.set_position(*camera_local_pos)
            camera2.set_rotation(*camera_local_rotation)
            settings.add_sensor(camera2)

            camera3 = Camera('CameraSeg',
                             PostProcessing='SemanticSegmentation')
            camera3.set_image_size(*image_size)
            camera3.set_position(*camera_local_pos)
            camera3.set_rotation(*camera_local_rotation)
            settings.add_sensor(camera3)

            client.load_settings(settings)

            # Start at location index id '0'
            client.start_episode(start_i)

            cameras_dict = {}
            pedestrians_dict = {}
            cars_dict = {}
            # Compute the camera transform matrix
            camera_to_car_transform = camera2.get_unreal_transform()
            # (Intrinsic) (3, 3) K Matrix
            K = np.identity(3)
            K[0, 2] = image_size[0] / 2.0
            K[1, 2] = image_size[1] / 2.0
            K[0,
              0] = K[1,
                     1] = image_size[0] / (2.0 * np.tan(fov * np.pi / 360.0))
            with open(output_folder + '/camera_intrinsics.p', 'w') as camfile:
                pickle.dump(K, camfile)

            # Iterate every frame in the episode except for the first one.
            for frame in range(1, number_of_frames):
                # Read the data produced by the server this frame.
                measurements, sensor_data = client.read_data()

                # Save one image every 'frame_step' frames
                if not frame % frame_step:
                    for name, measurement in sensor_data.items():
                        filename = '{:s}/{:0>6d}'.format(name, frame)
                        measurement.save_to_disk(
                            os.path.join(output_folder, filename))
                    # Start transformations time mesure.
                    timer = StopWatch()

                    # RGB image [[[r,g,b],..[r,g,b]],..[[r,g,b],..[r,g,b]]]
                    image_RGB = to_rgb_array(sensor_data['CameraRGB'])
                    image_seg = np.tile(
                        labels_to_array(sensor_data['CameraSeg']), (1, 1, 3))

                    # 2d to (camera) local 3d
                    # We use the image_RGB to colorize each 3D point, this is optional.
                    # "max_depth" is used to keep only the points that are near to the
                    # camera, meaning 1.0 the farest points (sky)
                    point_cloud = depth_to_local_point_cloud(
                        sensor_data['CameraDepth'], image_RGB, max_depth=far)

                    point_cloud_seg = depth_to_local_point_cloud(
                        sensor_data['CameraDepth'], image_seg, max_depth=far)

                    # (Camera) local 3d to world 3d.
                    # Get the transform from the player protobuf transformation.
                    world_transform = Transform(
                        measurements.player_measurements.transform)

                    # Compute the final transformation matrix.
                    car_to_world_transform = world_transform * camera_to_car_transform

                    # Car to World transformation given the 3D points and the
                    # transformation matrix.
                    point_cloud.apply_transform(car_to_world_transform)
                    point_cloud_seg.apply_transform(car_to_world_transform)

                    Rt = car_to_world_transform.matrix
                    Rt_inv = car_to_world_transform.inverse(
                    ).matrix  # Rt_inv is the world to camera matrix !!
                    #R_inv=world_transform.inverse().matrix
                    cameras_dict[frame] = {}
                    cameras_dict[frame]['inverse_rotation'] = Rt_inv[:]
                    cameras_dict[frame]['rotation'] = Rt[:]
                    cameras_dict[frame]['translation'] = Rt_inv[0:3, 3]
                    cameras_dict[frame][
                        'camera_to_car'] = camera_to_car_transform.matrix

                    # Get non-player info
                    vehicles = {}
                    pedestrians = {}
                    for agent in measurements.non_player_agents:
                        # check if the agent is a vehicle.
                        if agent.HasField('vehicle'):
                            pos = agent.vehicle.transform.location
                            pos_vector = np.array([[pos.x], [pos.y], [pos.z],
                                                   [1.0]])

                            trnasformed_3d_pos = np.dot(Rt_inv, pos_vector)
                            pos2d = np.dot(K, trnasformed_3d_pos[:3])

                            # Normalize the point
                            norm_pos2d = np.array([
                                pos2d[0] / pos2d[2], pos2d[1] / pos2d[2],
                                pos2d[2]
                            ])

                            # Now, pos2d contains the [x, y, d] values of the image in pixels (where d is the depth)
                            # You can use the depth to know the points that are in front of the camera (positive depth).

                            x_2d = image_size[0] - norm_pos2d[0]
                            y_2d = image_size[1] - norm_pos2d[1]
                            vehicles[agent.id] = {}
                            vehicles[agent.id]['transform'] = [
                                agent.vehicle.transform.location.x,
                                agent.vehicle.transform.location.y,
                                agent.vehicle.transform.location.z
                            ]
                            vehicles[agent.id][
                                'bounding_box.transform'] = agent.vehicle.bounding_box.transform.location.z

                            vehicles[agent.id][
                                'yaw'] = agent.vehicle.transform.rotation.yaw
                            vehicles[agent.id]['bounding_box'] = [
                                agent.vehicle.bounding_box.extent.x,
                                agent.vehicle.bounding_box.extent.y,
                                agent.vehicle.bounding_box.extent.z
                            ]
                            vehicle_transform = Transform(
                                agent.vehicle.bounding_box.transform)
                            pos = agent.vehicle.transform.location

                            bbox3d = agent.vehicle.bounding_box.extent

                            # Compute the 3D bounding boxes
                            # f contains the 4 points that corresponds to the bottom
                            f = np.array([[
                                pos.x + bbox3d.x, pos.y - bbox3d.y,
                                pos.z - bbox3d.z +
                                agent.vehicle.bounding_box.transform.location.z
                            ],
                                          [
                                              pos.x + bbox3d.x,
                                              pos.y + bbox3d.y,
                                              pos.z - bbox3d.z + agent.vehicle.
                                              bounding_box.transform.location.z
                                          ],
                                          [
                                              pos.x - bbox3d.x,
                                              pos.y + bbox3d.y,
                                              pos.z - bbox3d.z + agent.vehicle.
                                              bounding_box.transform.location.z
                                          ],
                                          [
                                              pos.x - bbox3d.x,
                                              pos.y - bbox3d.y,
                                              pos.z - bbox3d.z + agent.vehicle.
                                              bounding_box.transform.location.z
                                          ],
                                          [
                                              pos.x + bbox3d.x,
                                              pos.y - bbox3d.y,
                                              pos.z + bbox3d.z + agent.vehicle.
                                              bounding_box.transform.location.z
                                          ],
                                          [
                                              pos.x + bbox3d.x,
                                              pos.y + bbox3d.y,
                                              pos.z + bbox3d.z + agent.vehicle.
                                              bounding_box.transform.location.z
                                          ],
                                          [
                                              pos.x - bbox3d.x,
                                              pos.y + bbox3d.y,
                                              pos.z + bbox3d.z + agent.vehicle.
                                              bounding_box.transform.location.z
                                          ],
                                          [
                                              pos.x - bbox3d.x,
                                              pos.y - bbox3d.y,
                                              pos.z + bbox3d.z + agent.vehicle.
                                              bounding_box.transform.location.z
                                          ]])

                            f_rotated = vehicle_transform.transform_points(f)
                            f_2D_rotated = []
                            vehicles[
                                agent.id]['bounding_box_coord'] = f_rotated

                            for i in range(f.shape[0]):
                                point = np.array([[f_rotated[i, 0]],
                                                  [f_rotated[i, 1]],
                                                  [f_rotated[i, 2]], [1]])
                                transformed_2d_pos = np.dot(
                                    Rt_inv,
                                    point)  # 3d Position in camera space
                                pos2d = np.dot(
                                    K, transformed_2d_pos[:3]
                                )  # Conversion to camera frustum space
                                norm_pos2d = np.array([
                                    pos2d[0] / pos2d[2], pos2d[1] / pos2d[2],
                                    pos2d[2]
                                ])
                                #print([image_size[0] - (pos2d[0] / pos2d[2]), image_size[1] - (pos2d[1] / pos2d[2])])
                                f_2D_rotated.append(
                                    np.array([
                                        image_size[0] - norm_pos2d[0],
                                        image_size[1] - norm_pos2d[1]
                                    ]))
                            vehicles[
                                agent.id]['bounding_box_2D'] = f_2D_rotated

                        elif agent.HasField('pedestrian'):

                            pedestrians[agent.id] = {}
                            pedestrians[agent.id]['transform'] = [
                                agent.pedestrian.transform.location.x,
                                agent.pedestrian.transform.location.y,
                                agent.pedestrian.transform.location.z
                            ]
                            pedestrians[agent.id][
                                'yaw'] = agent.pedestrian.transform.rotation.yaw
                            pedestrians[agent.id]['bounding_box'] = [
                                agent.pedestrian.bounding_box.extent.x,
                                agent.pedestrian.bounding_box.extent.y,
                                agent.pedestrian.bounding_box.extent.z
                            ]
                            vehicle_transform = Transform(
                                agent.pedestrian.bounding_box.transform)
                            pos = agent.pedestrian.transform.location

                            bbox3d = agent.pedestrian.bounding_box.extent

                            # Compute the 3D bounding boxes
                            # f contains the 4 points that corresponds to the bottom
                            f = np.array(
                                [[pos.x + bbox3d.x, pos.y - bbox3d.y, pos.z],
                                 [pos.x + bbox3d.x, pos.y + bbox3d.y, pos.z],
                                 [pos.x - bbox3d.x, pos.y + bbox3d.y, pos.z],
                                 [pos.x - bbox3d.x, pos.y - bbox3d.y, pos.z],
                                 [
                                     pos.x + bbox3d.x, pos.y - bbox3d.y,
                                     pos.z + bbox3d.z
                                 ],
                                 [
                                     pos.x + bbox3d.x, pos.y + bbox3d.y,
                                     pos.z + bbox3d.z
                                 ],
                                 [
                                     pos.x - bbox3d.x, pos.y + bbox3d.y,
                                     pos.z + bbox3d.z
                                 ],
                                 [
                                     pos.x - bbox3d.x, pos.y - bbox3d.y,
                                     pos.z + bbox3d.z
                                 ]])

                            f_rotated = vehicle_transform.transform_points(f)
                            pedestrians[
                                agent.id]['bounding_box_coord'] = f_rotated
                            f_2D_rotated = []

                            for i in range(f.shape[0]):
                                point = np.array([[f_rotated[i, 0]],
                                                  [f_rotated[i, 1]],
                                                  [f_rotated[i, 2]], [1]])
                                transformed_2d_pos = np.dot(
                                    Rt_inv, point)  # See above for cars
                                pos2d = np.dot(K, transformed_2d_pos[:3])
                                norm_pos2d = np.array([
                                    pos2d[0] / pos2d[2], pos2d[1] / pos2d[2],
                                    pos2d[2]
                                ])
                                f_2D_rotated.append([
                                    image_size[0] - norm_pos2d[0],
                                    image_size[1] - norm_pos2d[1]
                                ])
                            pedestrians[
                                agent.id]['bounding_box_2D'] = f_2D_rotated

                    cars_dict[frame] = vehicles
                    pedestrians_dict[frame] = pedestrians

                    # End transformations time mesure.
                    timer.stop()

                    # Save PLY to disk
                    # This generates the PLY string with the 3D points and the RGB colors
                    # for each row of the file.
                    point_cloud.save_to_disk(
                        os.path.join(output_folder,
                                     '{:0>5}.ply'.format(frame)))
                    point_cloud_seg.save_to_disk(
                        os.path.join(output_folder,
                                     '{:0>5}_seg.ply'.format(frame)))

                    print_message(timer.milliseconds(), len(point_cloud),
                                  frame)

                if autopilot:
                    client.send_control(
                        measurements.player_measurements.autopilot_control)
                else:
                    control.hand_brake = True
                    client.send_control(control)
            with open(output_folder + '/cameras.p', 'w') as camerafile:
                pickle.dump(cameras_dict, camerafile)
                print(output_folder + "cameras.p")
            with open(output_folder + '/people.p', 'w') as peoplefile:
                pickle.dump(pedestrians_dict, peoplefile)
            with open(output_folder + '/cars.p', 'w') as carfile:
                pickle.dump(cars_dict, carfile)
示例#28
0
def run_carla_client(args):
    # Here we will run 3 episodes with 300 frames each.
    number_of_episodes = 15
    frames_per_episode = 10030
    #              [0  , 1  , 2  , 3  , 4  , 5  , 6 , 7, 8  , 9  , 10, 11, 12, 13, 14]
    # vehicles_num = [60, 60, 70, 50, 60, 60, 80, 60, 60, 60, 50, 70, 60, 50, 50]
    vehicles_num = [35, 35, 30, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35]
    # vehicles_num = [60, 60, 70, 50, 60, 60, 80, 60, 60, 60, 50, 70, 60, 50, 50]
    # We assume the CARLA server is already waiting for a client to connect at
    # host:port. To create a connection we can use the `make_carla_client`
    # context manager, it creates a CARLA client object and starts the
    # connection. It will throw an exception if something goes wrong. The
    # context manager makes sure the connection is always cleaned up on exit.
    with make_carla_client(args.host, args.port) as client:
        print('CarlaClient connected')
        # list_of_episodes = [11]
        list_of_episodes = [9, 11, 14]
        # list_of_episodes = [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 14]
        # list_of_episodes = [6, 7, 9, 11, 14]
        # list_of_episodes = [2, 3, 4, 5, 6, 7, 9, 11, 14]
        # list_of_episodes = [2, 7]
        # list_of_episodes = [5, 6, 7, 11, 14]
        # list_of_episodes = [6, 11]
        # list_of_episodes = [7]
        #list(range(0, number_of_episodes))
        #list_of_episodes.pop(13)
        #list_of_episodes.pop(12)
        #list_of_episodes.pop(10)
        #list_of_episodes.pop(8)

        for episode in list_of_episodes:
            # Start a new episode.

            if args.settings_filepath is None:

                # Create a CarlaSettings object. This object is a wrapper around
                # the CarlaSettings.ini file. Here we set the configuration we
                # want for the new episode.
                settings = CarlaSettings()
                settings.set(
                    SynchronousMode=True,
                    SendNonPlayerAgentsInfo=False,
                    NumberOfVehicles= vehicles_num[episode],#random.choice([0, 20, 15, 20, 25, 21, 24, 18, 40, 35, 25, 30]), #25,
                    NumberOfPedestrians=25,
                    DisableTwoWheeledVehicles=False,
                    WeatherId= episode, #1, #random.choice([1, 3, 7, 8, 14]),
                    QualityLevel=args.quality_level)
                settings.randomize_seeds()

                # Now we want to add a couple of cameras to the player vehicle.
                # We will collect the images produced by these cameras every
                # frame.
                #### Cameras aligned across the y-axis
                #### Horizontally shifted in the following Range
                # [-1.62, -1.08, -0.54, 0.0, 0.54, 1.08, 1.62]
                # LEFT RGB CAMERA
                # y_locs = [-1.62, -1.08, -0.54, 0.0, 0.54, 1.08, 1.62]
                # x_locs = [1.3, 1.84, 2.38, 2.92, 3.46, 4.0, 4.54]
                # y_locs_left = [-1.08, -0.54, 0.0, 0.54, 1.08]
                y_locs_left = -1.08
                x_locs_left = [1.84, 2.38, 2.92]
                LeftSideCameras = {}
                for i,x_position in enumerate(x_locs_left):
                    # COLOR
                    camera_rgb = Camera('LeftSideCameras{0}RGB'.format(i),
                                     PostProcessing='SceneFinal')
                    camera_rgb.set_image_size(800, 600)
                    camera_rgb.set_position(x_position, y_locs_left, 1.50)
                    camera_rgb.set_rotation(0, -90.0, 0)
                    LeftSideCameras['LeftSideCameras{0}RGB'.format(i)] = camera_rgb
                    settings.add_sensor(camera_rgb)
                    # DEPTH
                    camera_depth = Camera('LeftSideCameras{0}Depth'.format(i),
                                          PostProcessing='Depth')
                    camera_depth.set_image_size(800, 600)
                    camera_depth.set_position(x_position, y_locs_left, 1.50)
                    camera_depth.set_rotation(0, -90.0, 0)
                    LeftSideCameras['LeftSideCameras{0}Depth'.format(i)] = camera_depth
                    settings.add_sensor(camera_depth)
                    # SEGMENTATION
                    camera_seg = Camera('LeftSideCameras{0}Seg'.format(i),
                                       PostProcessing='SemanticSegmentation')
                    camera_seg.set_image_size(800, 600)
                    camera_seg.set_position(x_position, y_locs_left, 1.50)
                    camera_seg.set_rotation(0, -90.0, 0)
                    LeftSideCameras['LeftSideCameras{0}Seg'.format(i)] = camera_seg
                    settings.add_sensor(camera_seg)

                y_locs_right = 1.08
                x_locs_right = [1.84, 2.38, 2.92]
                RightSideCameras = {}
                for i,x_position in enumerate(x_locs_right):
                    # COLOR
                    camera_rgb = Camera('RightSideCameras{0}RGB'.format(i),
                                     PostProcessing='SceneFinal')
                    camera_rgb.set_image_size(800, 600)
                    camera_rgb.set_position(x_position, y_locs_right, 1.50)
                    camera_rgb.set_rotation(0, 90.0, 0)
                    RightSideCameras['RightSideCameras{0}RGB'.format(i)] = camera_rgb
                    settings.add_sensor(camera_rgb)
                    # DEPTH
                    camera_depth = Camera('RightSideCameras{0}Depth'.format(i),
                                          PostProcessing='Depth')
                    camera_depth.set_image_size(800, 600)
                    camera_depth.set_position(x_position, y_locs_right, 1.50)
                    camera_depth.set_rotation(0, 90.0, 0)
                    RightSideCameras['RightSideCameras{0}Depth'.format(i)] = camera_depth
                    settings.add_sensor(camera_depth)
                    # SEGMENTATION
                    camera_seg = Camera('RightSideCameras{0}Seg'.format(i),
                                       PostProcessing='SemanticSegmentation')
                    camera_seg.set_image_size(800, 600)
                    camera_seg.set_position(x_position, y_locs_right, 1.50)
                    camera_seg.set_rotation(0, 90.0, 0)
                    RightSideCameras['RightSideCameras{0}Seg'.format(i)] = camera_seg
                    settings.add_sensor(camera_seg)

            else:
                with open(args.settings_filepath, 'r') as fp:
                    settings = fp.read()
            scene = client.load_settings(settings)
            # Choose one player start at random.
            number_of_player_starts = len(scene.player_start_spots)
            player_start = random.randint(0, max(0, number_of_player_starts - 1))
            # Notify the server that we want to start the episode at the
            # player_start index. This function blocks until the server is ready
            # to start the episode.
            print('Starting new episode...')
            client.start_episode(player_start)
            LeftSideCameras_to_car = []
            for i in range(len(x_locs_right)):
                LeftSideCameras_to_car.append(LeftSideCameras['LeftSideCameras{0}RGB'.format(i)].get_transform())
            RightSideCameras_to_car = []
            for i in range(len(x_locs_right)):
                RightSideCameras_to_car.append(RightSideCameras['RightSideCameras{0}RGB'.format(i)].get_transform())
            # camera_90_p_l_to_car_transform = camera_90_p_l.get_transform()
            # camera_90_p_r_to_car_transform = camera_90_p_r.get_transform()
            # Create a folder for saving episode data
            if not os.path.isdir("/data/teddy/Datasets/carla_left_and_right/Town2/episode_{:0>5d}".format(episode)):
                os.makedirs("/data/teddy/Datasets/carla_left_and_right/Town2/episode_{:0>5d}".format(episode))

            # Iterate every frame in the episode.
            for frame in range(0, frames_per_episode):
                # Read the data produced by the server this frame.
                measurements, sensor_data = client.read_data()
                # player_measurements = measurements.player_measurements
                world_transform = Transform(measurements.player_measurements.transform)
                # Compute the final transformation matrix.
                LeftSideCameras_to_world = [world_transform*cam_to_car \
                                        for cam_to_car in LeftSideCameras_to_car]
                RightSideCameras_to_world = [world_transform*cam_to_car \
                                        for cam_to_car in RightSideCameras_to_car]

                # for i in range(len()):
                #     LeftSideCameras_to_world.append()
                #     RightSideCameras_to_world.append(world_transform * RightSideCameras_to_car[i])
                # Save the images to disk if requested.
                if frame >= 30 and (frame % 2 == 0):
                    if args.save_images_to_disk:
                        for name, measurement in sensor_data.items():
                            filename = args.out_filename_format.format(episode, name, (frame-30)/2)
                            measurement.save_to_disk(filename)
                        # Save Transform matrix of each camera to separated files
                        for cam_num in range(len(x_locs_left)):
                            line = ""
                            filename = "{}episode_{:0>5d}/LeftSideCameras{}".format(args.root_path, episode, cam_num) + ".txt"
                            with open(filename, 'a+') as myfile:
                                for x in np.asarray(LeftSideCameras_to_world[cam_num].matrix[:3, :]).reshape(-1):
                                    line += "{:.8e} ".format(x)
                                line = line[:-1]
                                line += "\n"
                                myfile.write(line)
                                line = ""
                        # Forward Cameras
                        # forward_cam_ids = list(range(len(x_locs_right)))
                        # forward_cam_ids.pop(mid_cam)
                        for i, cam_num in enumerate(x_locs_right):
                            line = ""
                            filename = "{}episode_{:0>5d}/RightSideCameras{}".format(args.root_path, episode, cam_num) + ".txt"
                            with open(filename, 'a+') as myfile:
                                for x in np.asarray(RightSideCameras_to_world[i].matrix[:3, :]).reshape(-1):
                                    line += "{:.8e} ".format(x)
                                line = line[:-1]
                                line += "\n"
                                myfile.write(line)
                                line = ""
                if not args.autopilot:
                    client.send_control(
                        steer=random.uniform(-1.0, 1.0),
                        throttle=0.5,
                        brake=0.0,
                        hand_brake=False,
                        reverse=False)
                else:
                    # Together with the measurements, the server has sent the
                    # control that the in-game autopilot would do this frame. We
                    # can enable autopilot by sending back this control to the
                    # server. We can modify it if wanted, here for instance we
                    # will add some noise to the steer.
                    control = measurements.player_measurements.autopilot_control
                    #control.steer += random.uniform(-0.1, 0.1)
                    client.send_control(control)
示例#29
0
    def build_experiments(self):
        """
        Creates the whole set of experiment objects,
        The experiments created depend on the selected Town.


        """

        # We set the camera
        # This single RGB camera is used on every experiment

        camera = Camera('CameraMiddle')
        camera.set(FOV=90)
        camera.set_image_size(768, 576)
        camera.set_position(1.5, 0.0, 1.6)
        camera.set_rotation(0, 0, 0)

        if self._city_name == 'Town01':
            poses_tasks = self._poses_town01()
            vehicles_tasks = [0, 0, 0, 100]
            pedestrians_tasks = [0, 0, 0, 300]
            n_samples = [0, 0, 30 // 4, 60 // 4]
            #n_samples = [3, 6, 6, 9]
        else:
            poses_tasks = self._poses_town02()
            vehicles_tasks = [0, 0, 0, 50]
            pedestrians_tasks = [0, 0, 0, 150]
            n_samples = [0, 0, 15, len(poses_tasks[-1])]
            #n_samples = [3, 6, 6, 9]

        experiments_vector = []

        random.seed(1)

        for weather in self.weathers:

            for iteration in range(len(poses_tasks)):
                poses = poses_tasks[iteration]
                vehicles = vehicles_tasks[iteration]
                pedestrians = pedestrians_tasks[iteration]

                nsample = n_samples[iteration]
                if nsample == 0:
                    continue
                poses = random.sample(poses, nsample)

                conditions = CarlaSettings()
                conditions.set(SendNonPlayerAgentsInfo=True,
                               NumberOfVehicles=vehicles,
                               NumberOfPedestrians=pedestrians,
                               WeatherId=weather,
                               QualityLevel="Low")
                # Add all the cameras that were set for this experiments

                conditions.add_sensor(camera)

                experiment = Experiment()
                experiment.set(Conditions=conditions,
                               Poses=poses,
                               Task=iteration,
                               Repetitions=1)
                experiments_vector.append(experiment)

        return experiments_vector
示例#30
0
def run_carla_client(args):
    # Here we will run 3 episodes with 300 frames each.
    number_of_episodes = 15
    frames_per_episode = 10050
    #              [0  , 1  , 2  , 3  , 4  , 5  , 6 , 7, 8  , 9  , 10, 11, 12, 13, 14]
    # vehicles_num = [60, 60, 70, 50, 60, 60, 80, 60, 60, 60, 50, 70, 60, 50, 50]
    vehicles_num = [35, 35, 30, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35]
    # vehicles_num = [60, 60, 70, 50, 60, 60, 80, 60, 60, 60, 50, 70, 60, 50, 50]
    # We assume the CARLA server is already waiting for a client to connect at
    # host:port. To create a connection we can use the `make_carla_client`
    # context manager, it creates a CARLA client object and starts the
    # connection. It will throw an exception if something goes wrong. The
    # context manager makes sure the connection is always cleaned up on exit.
    with make_carla_client(args.host, args.port) as client:
        print('CarlaClient connected')
        # list_of_episodes = [11]
        # list_of_episodes = [9, 11, 14]
        list_of_episodes = [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 14]
        # list_of_episodes = [6, 7, 9, 11, 14]
        # list_of_episodes = [2, 3, 4, 5, 6, 7, 9, 11, 14]
        # list_of_episodes = [2, 7]
        # list_of_episodes = [5, 6, 7, 11, 14]
        # list_of_episodes = [6, 11]
        # list_of_episodes = [7]
        #list(range(0, number_of_episodes))
        #list_of_episodes.pop(13)
        #list_of_episodes.pop(12)
        #list_of_episodes.pop(10)
        #list_of_episodes.pop(8)

        for episode in list_of_episodes:
            # Start a new episode.

            if args.settings_filepath is None:

                # Create a CarlaSettings object. This object is a wrapper around
                # the CarlaSettings.ini file. Here we set the configuration we
                # want for the new episode.
                settings = CarlaSettings()
                settings.set(
                    SynchronousMode=True,
                    SendNonPlayerAgentsInfo=False,
                    NumberOfVehicles=vehicles_num[
                        episode],  #random.choice([0, 20, 15, 20, 25, 21, 24, 18, 40, 35, 25, 30]), #25,
                    NumberOfPedestrians=4,
                    DisableTwoWheeledVehicles=False,
                    WeatherId=episode,  #1, #random.choice([1, 3, 7, 8, 14]),
                    QualityLevel=args.quality_level)
                settings.randomize_seeds()

                # Now we want to add a couple of cameras to the player vehicle.
                # We will collect the images produced by these cameras every
                # frame.
                #### Cameras aligned across the y-axis
                #### Horizontally shifted in the following Range
                # [-1.62, -1.08, -0.54, 0.0, 0.54, 1.08, 1.62]
                # LEFT RGB CAMERA
                # y_locs = [-1.62, -1.08, -0.54, 0.0, 0.54, 1.08, 1.62]
                # z_locs = [1.3, 1.84, 2.38, 2.92, 3.46, 4.0, 4.54]
                # y_locs_left = [-1.08, -0.54, 0.0, 0.54, 1.08]
                z_locs = [
                    i.item() - 7 * 0.5 for i in np.linspace(0, 14 * 0.5, 15)
                ]  # Up
                y_locs = [
                    i.item() - 7 * 0.5 for i in np.linspace(0, 14 * 0.5, 15)
                ]  # Right
                rotation_y = 0
                CameraArray = {}
                for j, y_position in enumerate(y_locs):
                    for i, z_position in enumerate(z_locs):
                        # COLOR
                        camera_rgb = Camera('RGB_{:0>2d}_{:0>2d}'.format(j, i),
                                            PostProcessing='SceneFinal')
                        camera_rgb.set_image_size(800, 600)
                        camera_rgb.set_position(2.0, y_position,
                                                2.00 - z_position)
                        camera_rgb.set_rotation(0, rotation_y, 0)
                        CameraArray['RGB_{:0>2d}_{:0>2d}'.format(
                            j, i)] = camera_rgb
                        settings.add_sensor(camera_rgb)
                        # DEPTH
                        camera_depth = Camera('Depth_{:0>2d}_{:0>2d}'.format(
                            j, i),
                                              PostProcessing='Depth')
                        camera_depth.set_image_size(800, 600)
                        camera_depth.set_position(2.0, y_position,
                                                  2.0 - z_position)
                        camera_depth.set_rotation(0, rotation_y, 0)
                        CameraArray['Depth_{:0>2d}_{:0>2d}'.format(
                            j, i)] = camera_depth
                        settings.add_sensor(camera_depth)
                        # SEGMENTATION
                        camera_seg = Camera(
                            'SEM_{:0>2d}_{:0>2d}'.format(j, i),
                            PostProcessing='SemanticSegmentation')
                        camera_seg.set_image_size(800, 600)
                        camera_seg.set_position(2.0, y_position,
                                                2.0 - z_position)
                        camera_seg.set_rotation(0, rotation_y, 0)
                        CameraArray['SEM_{:0>2d}_{:0>2d}'.format(
                            j, i)] = camera_seg
                        settings.add_sensor(camera_seg)
            else:
                with open(args.settings_filepath, 'r') as fp:
                    settings = fp.read()
            scene = client.load_settings(settings)
            # Choose one player start at random.
            number_of_player_starts = len(scene.player_start_spots)
            player_start = random.randint(0, max(0,
                                                 number_of_player_starts - 1))
            # Notify the server that we want to start the episode at the
            # player_start index. This function blocks until the server is ready
            # to start the episode.
            print('Starting new episode...')
            client.start_episode(player_start)
            print('Created a new episode...')
            Cameras_to_car = []
            for j in range(len(y_locs)):
                for i in range(len(z_locs)):
                    Cameras_to_car.append(
                        CameraArray['RGB_{:0>2d}_{:0>2d}'.format(
                            j, i)].get_transform())
            print('created a camera array')
            # Create a folder for saving episode data
            episode_dir = "{}/Town{:0>2d}/episode_{:0>5d}".format(
                args.data_path, args.town_id, episode)
            if not os.path.isdir(episode_dir):
                os.makedirs(episode_dir)
            print('created destination folder')
            # Iterate every frame in the episode.
            for frame in range(0, frames_per_episode):
                # Read the data produced by the server this frame.
                time.sleep(1)
                measurements, sensor_data = client.read_data()
                print('got measurements')
                # player_measurements = measurements.player_measurements
                world_transform = Transform(
                    measurements.player_measurements.transform)
                # Compute the final transformation matrix.
                print('debug point 1')
                Cameras_to_world = [world_transform*cam_to_car \
                                        for cam_to_car in Cameras_to_car]
                if frame >= 50 and (frame % 100 == 0):
                    print('debug point 2')
                    if args.save_images_to_disk:
                        output_dir = os.path.join(
                            episode_dir, "{:0>6d}".format((frame - 50) / 100))
                        if not os.path.isdir(output_dir):
                            os.makedirs(output_dir)
                        for name, measurement in sensor_data.items():
                            # filename = args.out_filename_format.format(episode, name, (frame-50)/100)
                            filename = os.path.join(output_dir, name)
                            print('writing:', filename)
                            measurement.save_to_disk(filename)
                        cam_2_world_file = os.path.join(
                            output_dir, 'cam_2_world.txt')
                        with open(cam_2_world_file, 'w') as myfile:
                            for cam_num in range(len(Cameras_to_world)):
                                line = ""
                                for x in np.asarray(Cameras_to_world[cam_num].
                                                    matrix[:3, :]).reshape(-1):
                                    line += "{:.8e} ".format(x)
                                line = line[:-1]
                                line += "\n"
                                myfile.write(line)
                if not args.autopilot:
                    client.send_control(steer=random.uniform(-1.0, 1.0),
                                        throttle=0.5,
                                        brake=0.0,
                                        hand_brake=False,
                                        reverse=False)
                else:
                    # Together with the measurements, the server has sent the
                    # control that the in-game autopilot would do this frame. We
                    # can enable autopilot by sending back this control to the
                    # server. We can modify it if wanted, here for instance we
                    # will add some noise to the steer.
                    control = measurements.player_measurements.autopilot_control
                    #control.steer += random.uniform(-0.1, 0.1)
                    client.send_control(control)
def run_carla_client(args, classifier, plt, plt_index):
    global global_steering_indicator
    # Here we will run 3 episodes with 300 frames each.
    frames_per_episode = args.frames
    
    if args.position:
      number_of_episodes = 1
    else:
      number_of_episodes = 3

    # We assume the CARLA server is already waiting for a client to connect at
    # host:port. To create a connection we can use the `make_carla_client`
    # context manager, it creates a CARLA client object and starts the
    # connection. It will throw an exception if something goes wrong. The
    # context manager makes sure the connection is always cleaned up on exit.
    with make_carla_client(args.host, args.port) as client:
        print('CarlaClient connected')

        for episode in range(0, number_of_episodes):
            # Start a new episode.

            if args.settings_filepath is None:

                # Create a CarlaSettings object. This object is a wrapper around
                # the CarlaSettings.ini file. Here we set the configuration we
                # want for the new episode.
                settings = CarlaSettings()
                settings.set(
                    SynchronousMode=True,
                    SendNonPlayerAgentsInfo=True,
                    NumberOfVehicles=0,
                    NumberOfPedestrians=0,
                    WeatherId=random.choice([2]),
                    QualityLevel=args.quality_level)
                settings.randomize_seeds()

                # Now we want to add a couple of cameras to the player vehicle.
                # We will collect the images produced by these cameras every
                # frame.

                # # The default camera captures RGB images of the scene.
                # camera0 = Camera('CameraRGB')
                # # Set image resolution in pixels.
                # camera0.set_image_size(256, 256)
                # # Set its position relative to the car in meters.
                # camera0.set_position(2.2, 0, 1.30)
                # settings.add_sensor(camera0)

                if args.lidar:
                    # Adding a lidar sensor
                    lidar = Lidar('Lidar32')
                    lidar.set_position(0, 0, 2.50)
                    lidar.set_rotation(0, 0, 0)
                    lidar.set(
                        Channels=32,
                        Range=50,
                        PointsPerSecond=args.lidar_pps,
                        RotationFrequency=10,
                        UpperFovLimit=10,
                        LowerFovLimit=-30)
                    settings.add_sensor(lidar)
                else:
                    # Let's add another camera producing ground-truth depth.
                    camera1 = Camera('CameraDepth', PostProcessing='Depth')
                    camera1.set_image_size(256, 256)
                    camera1.set_position(2.2, 0, 1.30)
                    camera1.set(FOV=90.0)
                    #camera1.set_rotation(pitch=-8, yaw=0, roll=0)
                    settings.add_sensor(camera1)

                    camera2 = Camera('CameraSeg', PostProcessing='SemanticSegmentation')
                    camera2.set_image_size(256, 256)
                    camera2.set_position(2.2, 0, 1.30)
                    camera2.set(FOV=90.0)
                    #camera2.set_rotation(pitch=-8, yaw=0, roll=0)
                    settings.add_sensor(camera2)

                if args.capture:
                  camera3 = Camera('CameraRGB')
                  camera3.set_image_size(512, 256)
                  camera3.set_position(-8, 0, 5)
                  camera3.set(FOV=90.0)
                  camera3.set_rotation(pitch=-20, yaw=0, roll=0)
                  settings.add_sensor(camera3)


            else:

                # Alternatively, we can load these settings from a file.
                with open(args.settings_filepath, 'r') as fp:
                    settings = fp.read()

            # Now we load these settings into the server. The server replies
            # with a scene description containing the available start spots for
            # the player. Here we can provide a CarlaSettings object or a
            # CarlaSettings.ini file as string.
            scene = client.load_settings(settings)

            # Choose one player start at random.
            number_of_player_starts = len(scene.player_start_spots)
            if args.position:
              player_start = args.position
            else:
              player_start = random.choice([42,67,69,79,94,97, 70, 44,85,102])
            

            # Notify the server that we want to start the episode at the
            # player_start index. This function blocks until the server is ready
            # to start the episode.
            print('Starting new episode at %r...' % scene.map_name)

            """ Begin added code """
            if args.capture:
              directory = '_capture/pos{}'.format(player_start)
              if not os.path.exists(directory):
                  os.makedirs(directory)
              print("Capturing point clouds to {}".format(directory))


            """ End added code """

            client.start_episode(player_start)

            # Iterate every frame in the episode.
            for frame in range(0, frames_per_episode):

                # Read the data produced by the server this frame.
                measurements, sensor_data = client.read_data()

                """ Begin added code """
                # dict of steering directions of available curves
                # [1,0] if the next curve will be a left one
                # [0,1] if the next curve will be a right one
                directions = {
                  67: [[1,0]], # straight
                  42: [[1,0]], # straight or left
                  69: [[0,1]], # right
                  79: [[0,1]], # right
                  94: [[0,1]], # right
                  97: [[0,1]], # right
                  70: [[1,0]], # left
                  44: [[1,0]], # left
                  85: [[1,0]], # left
                  102: [[1,0]] # left
                }

                #dynamically set the global steering indicator during the game
                if(args.key_control):
                  set_steering_indicator_from_keypress()

                steering_direction = args.steering_indicator or global_steering_indicator

                if args.use_steering_indicator:
                  if steering_direction == "left":
                    steering_indicator = [[1,0]]
                  elif steering_direction == "right":
                    steering_indicator = [[0,1]]
                  else:
                    steering_indicator = [[0,0]]
                  steering_indicator = torch.Tensor(steering_indicator)
                  if cuda_available:
                    steering_indicator = steering_indicator.cuda()

                if args.lidar:
                    point_cloud = sensor_data['Lidar32'].data
                    if args.lidar_fov == 180:
                      print("FOV 180")
                      print("Before", point_cloud.shape)
                      point_cloud = filter_fov(point_cloud)
                      print("After", point_cloud.shape)


                else:
                    # Get depth and seg as numpy array for further processing
                    depth_obj = sensor_data['CameraDepth']
                    depth = depth_obj.data
                    fov = depth_obj.fov
                    
                    # Filter seg to get intersection points
                    seg = sensor_data['CameraSeg'].data
                    filtered_seg = pp.filter_seg_image(seg)

                    # Add following lines to measure performance of generating pointcloud
                    # def f():
                    #   return pp.depth_to_local_point_cloud(depth, fov, filtered_seg)
                    # print(timeit.timeit(f, number=1000) / 1000)

                    # Create pointcloud from seg and depth (but only take intersection points)
                    point_cloud = pp.depth_to_local_point_cloud(depth, fov, filtered_seg)

                # Save point cloud for later visualization
                if args.capture:
                  pp.save_to_disk(point_cloud, "{}/point_clouds/point_cloud_{:0>5d}.ply".format(directory, frame))
                  sensor_data['CameraRGB'].save_to_disk('{}/images/image_{:0>5d}.png'.format(directory, frame))
                

                # Normalizing the point cloud
                if not args.no_center or not args.no_norm:
                  point_cloud = point_cloud - np.expand_dims(np.mean(point_cloud, axis=0), 0)  # center
                
                if not args.no_norm:
                  dist = np.max(np.sqrt(np.sum(point_cloud ** 2, axis=1)), 0)
                  point_cloud = point_cloud / dist  # scale

                #pp.save_point_cloud(point_cloud, 'test')
                # pp.save_seg_image(filtered_seg)
                """ End added code """
                # Print some of the measurements.
                #print_measurements(measurements)

                # # Save the images to disk if requested.
                # if args.save_images_to_disk:
                #     for name, measurement in sensor_data.items():
                #         filename = args.out_filename_format.format(episode, name, frame)
                #         measurement.save_to_disk(filename)

                # We can access the encoded data of a given image as numpy
                # array using its "data" property. For instance, to get the
                # depth value (normalized) at pixel X, Y
                #
                #     depth_array = sensor_data['CameraDepth'].data
                #     value_at_pixel = depth_array[Y, X]
                #

                # Now we have to send the instructions to control the vehicle.
                # If we are in synchronous mode the server will pause the
                # simulation until we send this control.
                
                if not args.autopilot:
                    client.send_control(
                        steer=random.uniform(-1.0, 1.0),
                        throttle=0.5,
                        brake=0.0,
                        hand_brake=False,
                        reverse=False)

                else:

                    # Together with the measurements, the server has sent the
                    # control that the in-game autopilot would do this frame. We
                    # can enable autopilot by sending back this control to the
                    # server. We can modify it if wanted, here for instance we
                    # will add some noise to the steer.


                    """ Beginn added code """
                    control = measurements.player_measurements.autopilot_control
                    #steer = control.steer
                    #print(control)

                    point_cloud = point_cloud.transpose()
                    points = np.expand_dims(point_cloud, axis=0)
                    points = torch.from_numpy(points.astype(np.float32))
                    #print(points)
                    if cuda_available:
                      points = points.cuda()
                    classifier = classifier.eval()

                    if args.use_steering_indicator:
                      #print("Using PointNetReg2")
                      steer, _, _ = classifier((points, steering_indicator))
                    else:
                      #print("Using PointNetReg")
                      steer, _, _ = classifier(points)
                    steer = steer.item()
                    if args.use_steering_indicator:
                      print("Using steering indicator: {} / {}".format(steering_direction, steering_indicator))
                    print("Autopilot steer: ", control.steer)
                    print("Pointnet steer: ", steer)
                    print("Relative difference: ", steer / control.steer if control.steer != 0.0 else 'NaN')
                    print("Absolute difference: ", control.steer - steer)
                    print("FOV:", args.lidar_fov)

                    # Plot some visualizations to visdom
                    if args.visdom:
                      plt.plot_point_cloud(points, var_name='pointcloud')
                      plt.plot('steering', 'PointNet', 'Steering', plt_index, steer)
                      plt.plot('steering', 'Autopilot', 'Steering', plt_index, control.steer)
                      plt_index += 1

                    # Let pointnet take over steering control
                    if True:
                      print("Who's in command: POINTNET")
                      control.steer = steer
                      if args.ignore_red_lights or args.use_steering_indicator:
                        control.throttle = 0.5
                        control.brake=0.0
                        hand_brake=False

                    else:
                      print("Who's in command: AUTOPILOT")

                    print("_________")
                    #pdb.set_trace()
                    """ End added code """
                    client.send_control(control)
示例#32
0
def run_carla_client(args):
    # Here we will run 3 episodes with 300 frames each.
    number_of_episodes = 5
    cut_per_episode = 40
    frames_per_cut = 200


    # We assume the CARLA server is already waiting for a client to connect at
    # host:port. To create a connection we can use the `make_carla_client`
    # context manager, it creates a CARLA client object and starts the
    # connection. It will throw an exception if something goes wrong. The
    # context manager makes sure the connection is always cleaned up on exit.
    with make_carla_client(args.host, args.port) as client:
        print('CarlaClient connected')
        for episode in range(0, number_of_episodes):
            print("input any key to continue...")
            start = input()
            # each episode dir store a set of traindata.  if dir not existed, then make it
            pathdir = '/home/kadn/dataTrain/episode_{:0>3}'.format(episode)
            mkdir(pathdir)

            # Create a CarlaSettings object. This object is a wrapper around
            # the CarlaSettings.ini file. Here we set the configuration we
            # want for the new episode.
            settings = CarlaSettings()
            settings.set(
                SynchronousMode=True,
                SendNonPlayerAgentsInfo=True,
                NumberOfVehicles=20,
                NumberOfPedestrians=40,
                # WeatherId=random.choice([1, 3, 7, 8, 14]),
                WeatherId = 1,
                QualityLevel=args.quality_level)
            settings.randomize_seeds()
            # Now we want to add a couple of cameras to the player vehicle.
            # We will collect the images produced by these cameras every
            # frame.

            # The default camera captures RGB images of the scene.
            camera0 = Camera('CameraRGB')
            # Set image resolution in pixels.
            camera0.set_image_size(88, 200)
            # Set its position relative to the car in meters.
            camera0.set_position(0.30, 0, 1.30)
            settings.add_sensor(camera0)

            # Let's add another camera producing ground-truth depth.
            camera1 = Camera('CameraDepth', PostProcessing='Depth')
            camera1.set_image_size(200, 88)
            camera1.set_position(0.30, 0, 1.30)
            settings.add_sensor(camera1)

            camera2 = Camera('CameraSemSeg', PostProcessing='SemanticSegmentation')
            camera2.set_image_size(88, 200)
            camera2.set_position(2.0, 0.0, 1.4)
            camera2.set_rotation(0.0, 0.0, 0.0)
            settings.add_sensor(camera2)
            # if args.lidar:
            lidar = Lidar('Lidar32')
            lidar.set_position(0, 0, 2.50)
            lidar.set_rotation(0, 0, 0)
            lidar.set(
                Channels=0,
                Range=30,
                PointsPerSecond=200000,
                RotationFrequency=10,
                UpperFovLimit=0,
                LowerFovLimit=0)
            settings.add_sensor(lidar)

            # else:
            #
            #     # Alternatively, we can load these settings from a file.
            #     with open(args.settings_filepath, 'r') as fp:
            #         settings = fp.read()

            # Now we load these settings into the server. The server replies
            # with a scene description containing the available start spots for
            # the player. Here we can provide a CarlaSettings object or a
            # CarlaSettings.ini file as string.
            scene = client.load_settings(settings)

            # Choose one player start at random.
            # number_of_player_starts = len(scene.player_start_spots)
            # player_start = random.randint(0, max(0, number_of_player_starts - 1))
            player_start = 1

            # Notify the server that we want to start the episode at the
            # player_start index. This function blocks until the server is ready
            # to start the episode.
            print('Starting new episode at %r...' % scene.map_name)
            # Start a new episode.
            client.start_episode(player_start)

            for cut_per_episode in range(0,cut_per_episode):
                num = fileCounter(pathdir)
                filename = "data_{:0>6}.h5".format(num)
                filepath = pathdir + '/' + filename
                f = h5py.File(filepath, "w")
                rgb_file = f.create_dataset("rgb", (200, 88, 200), np.uint8)
                seg_file = f.create_dataset("seg", (200, 88, 200), np.uint8)
                lidar_file = f.create_dataset('lidar',(200,200,200),np.uint8)
                startendpoint = f.create_dataset('startend',(1,2),np.float32)
                targets_file = f.create_dataset("targets", (200, 28), np.float32)
                index_file = 0

                # Iterate every frame in the episode.
                for frame in range(0, frames_per_cut):
                    # Read the data produced by the server this frame.
                    measurements, sensor_data = client.read_data()

                    # get data and store
                    sensors, targets_data = record_train_data(measurements,sensor_data)
                    rgb_file[:,:,index_file] = sensors['rgb']
                    seg_file[:,:,index_file] = sensors['seg']
                    lidar_file[:,:,index_file] = sensors['lidar']
                    targets_file[index_file,:] = targets_data
                    index_file += 1

                    # We can access the encoded data of a given image as numpy
                    # array using its "data" property. For instance, to get the
                    # depth value (normalized) at pixel X, Y
                    #
                    #     depth_array = sensor_data['CameraDepth'].data
                    #     value_at_pixel = depth_array[Y, X]
                    #

                    # Now we have to send the instructions to control the vehicle.
                    # If we are in synchronous mode the server will pause the
                    # simulation until we send this control.

                    if  args.autopilot:

                        client.send_control(
                            steer=0,
                            throttle=0.8,
                            brake=0.0,
                            hand_brake=False,
                            reverse=False)

                    else:

                        # Together with the measurements, the server has sent the
                        # control that the in-game autopilot would do this frame. We
                        # can enable autopilot by sending back this control to the
                        # server. We can modify it if wanted, here for instance we
                        # will add some noise to the steer.

                        control = measurements.player_measurements.autopilot_control
                        control.steer += random.uniform(-0.05, 0.05)
                        client.send_control(control)
示例#33
0
def run_carla_client(args):
    skip_frames = 100  # 100 # at 10 fps
    number_of_episodes = args.n_episode
    frames_per_episode = args.n_frame

    # n_weather = 14 # weathers starts from 1
    # n_player_start_spots = 152 # Town01
    n_player_start_spots = 83  # Town02

    weathers = number_of_episodes * [2]  # CloudyNoon
    start_spots = list(range(n_player_start_spots))
    random.shuffle(start_spots)
    assert number_of_episodes < n_player_start_spots
    start_spots = start_spots[:number_of_episodes]

    # weathers = list(range(number_of_episodes))
    # # random.shuffle(weathers)
    # weathers = [w % 14 + 1 for w in weathers]
    # https://carla.readthedocs.io/en/latest/carla_settings/

    # number_of_episodes = n_weather*n_player_start_spots
    # frames_per_episode = args.n_frame

    # weathers, start_spots = np.meshgrid(list(range(1, n_weather+1)), list(range(n_player_start_spots)))

    # weathers = weathers.flatten()
    # start_spots = start_spots.flatten()

    if not os.path.isdir(args.out_dir):
        os.makedirs(args.out_dir)
    np.savetxt(join(args.out_dir, 'weathers.txt'), weathers, fmt='%d')
    np.savetxt(join(args.out_dir, 'start-spots.txt'), start_spots, fmt='%d')
    # We assume the CARLA server is already waiting for a client to connect at
    # host:port. To create a connection we can use the `make_carla_client`
    # context manager, it creates a CARLA client object and starts the
    # connection. It will throw an exception if something goes wrong. The
    # context manager makes sure the connection is always cleaned up on exit.
    with make_carla_client(args.host, args.port) as client:
        print('CarlaClient connected')

        for episode in range(number_of_episodes):
            # Start a new episode.

            if args.settings_filepath is None:

                # Create a CarlaSettings object. This object is a wrapper around
                # the CarlaSettings.ini file. Here we set the configuration we
                # want for the new episode.
                settings = CarlaSettings()
                settings.set(
                    SynchronousMode=True,
                    SendNonPlayerAgentsInfo=True,
                    NumberOfVehicles=20,
                    NumberOfPedestrians=40,
                    WeatherId=weathers[episode],
                    # WeatherId=random.randrange(14) + 1,
                    # WeatherId=random.choice([1, 3, 7, 8, 14]),
                    QualityLevel=args.quality_level)
                settings.randomize_seeds()

                # Now we want to add a couple of cameras to the player vehicle.
                # We will collect the images produced by these cameras every
                # frame.

                # The default camera captures RGB images of the scene.
                camera0 = Camera('RGB')
                # Set image resolution in pixels.
                camera0.set(FOV=args.cam_fov)
                camera0.set_image_size(args.x_res, args.y_res)
                # Set its position relative to the car in meters.
                camera0.set_position(*args.cam_offset)
                camera0.set_rotation(*args.cam_rotation)
                settings.add_sensor(camera0)

                # Let's add another camera producing ground-truth depth.
                camera1 = Camera('Depth', PostProcessing='Depth')
                camera1.set(FOV=args.cam_fov)
                camera1.set_image_size(args.x_res, args.y_res)
                camera1.set_position(*args.cam_offset)
                camera1.set_rotation(*args.cam_rotation)
                settings.add_sensor(camera1)

                camera2 = Camera('SegRaw',
                                 PostProcessing='SemanticSegmentation')
                camera2.set(FOV=args.cam_fov)
                camera2.set_image_size(args.x_res, args.y_res)
                camera2.set_position(*args.cam_offset)
                camera2.set_rotation(*args.cam_rotation)
                settings.add_sensor(camera2)

                if args.lidar:
                    lidar = Lidar('Lidar32')
                    lidar.set_position(0, 0, 2.50)
                    lidar.set_rotation(0, 0, 0)
                    lidar.set(Channels=32,
                              Range=50,
                              PointsPerSecond=100000,
                              RotationFrequency=10,
                              UpperFovLimit=10,
                              LowerFovLimit=-30)
                    settings.add_sensor(lidar)

            else:

                # Alternatively, we can load these settings from a file.
                with open(args.settings_filepath, 'r') as fp:
                    settings = fp.read()

            # Now we load these settings into the server. The server replies
            # with a scene description containing the available start spots for
            # the player. Here we can provide a CarlaSettings object or a
            # CarlaSettings.ini file as string.
            scene = client.load_settings(settings)

            # Choose one player start at random.
            # number_of_player_starts = len(scene.player_start_spots)
            # player_start = random.randrange(number_of_player_starts)

            player_start = start_spots[episode]

            # Notify the server that we want to start the episode at the
            # player_start index. This function blocks until the server is ready
            # to start the episode.
            print('Starting new episode...')
            client.start_episode(player_start)

            frame = 0
            save_frame_idx = 0

            # extra_frames = 0
            # last_control = None
            # last_control_changed = 0
            # while frame < skip_frames + frames_per_episode + extra_frames:

            # Iterate every frame in the episode.
            for frame in range(skip_frames + frames_per_episode):
                # Read the data produced by the server this frame.
                measurements, sensor_data = client.read_data()

                # Print some of the measurements.
                print_measurements(measurements)

                # Save the images to disk if requested.
                if args.save_images_to_disk and frame >= skip_frames \
                    and (frame - skip_frames) % args.save_every_n_frames == 0:
                    # if last_control_changed < frame - args.save_every_n_frames:
                    #     extra_frames += args.save_every_n_frames
                    #     last_control_changed += args.save_every_n_frames
                    # else:
                    save_frame_idx += 1
                    for name, measurement in sensor_data.items():
                        filename = args.out_filename_format.format(
                            episode + 1, name, save_frame_idx)
                        measurement.save_to_disk(filename)

                # We can access the encoded data of a given image as numpy
                # array using its "data" property. For instance, to get the
                # depth value (normalized) at pixel X, Y
                #
                #     depth_array = sensor_data['CameraDepth'].data
                #     value_at_pixel = depth_array[Y, X]
                #

                # Now we have to send the instructions to control the vehicle.
                # If we are in synchronous mode the server will pause the
                # simulation until we send this control.

                if not args.autopilot:

                    client.send_control(steer=random.uniform(-1.0, 1.0),
                                        throttle=0.5,
                                        brake=0.0,
                                        hand_brake=False,
                                        reverse=False)

                else:

                    # Together with the measurements, the server has sent the
                    # control that the in-game autopilot would do this frame. We
                    # can enable autopilot by sending back this control to the
                    # server. We can modify it if wanted, here for instance we
                    # will add some noise to the steer.

                    control = measurements.player_measurements.autopilot_control
                    # if last_control:
                    #     for v1, v2 in zip(control.values(), last_control.values()):
                    #         if v1 != v2:
                    #             last_control_changed = frame
                    #             break

                    control.steer += random.uniform(-0.1, 0.1)
                    client.send_control(control)
示例#34
0
    def _reset(self):
        self.num_steps = 0
        self.total_reward = 0
        self.prev_measurement = None
        self.prev_image = None
        self.episode_id = datetime.today().strftime("%Y-%m-%d_%H-%M-%S_%f")
        self.measurements_file = None

        # Create a CarlaSettings object. This object is a wrapper around
        # the CarlaSettings.ini file. Here we set the configuration we
        # want for the new episode.
        settings = CarlaSettings()
        self.scenario = random.choice(self.config["scenarios"])
        assert self.scenario["city"] == self.city, (self.scenario, self.city)
        self.weather = random.choice(self.scenario["weather_distribution"])
        settings.set(
            SynchronousMode=True,
            # ServerTimeOut=10000, # CarlaSettings: no key named 'ServerTimeOut'
            SendNonPlayerAgentsInfo=True,
            NumberOfVehicles=self.scenario["num_vehicles"],
            NumberOfPedestrians=self.scenario["num_pedestrians"],
            WeatherId=self.weather)
        settings.randomize_seeds()

        if self.config["use_depth_camera"]:
            camera1 = Camera("CameraDepth", PostProcessing="Depth")
            camera1.set_image_size(self.config["render_x_res"],
                                   self.config["render_y_res"])
            # camera1.set_position(30, 0, 130)
            camera1.set(FOV=120)
            camera1.set_position(2.0, 0.0, 1.4)
            camera1.set_rotation(0.0, 0.0, 0.0)

            settings.add_sensor(camera1)

        camera2 = Camera("CameraRGB")
        camera2.set_image_size(self.config["render_x_res"],
                               self.config["render_y_res"])
        # camera2.set_position(30, 0, 130)
        # camera2.set_position(0.3, 0.0, 1.3)
        camera2.set(FOV=120)
        camera2.set_position(2.0, 0.0, 1.4)
        camera2.set_rotation(0.0, 0.0, 0.0)

        settings.add_sensor(camera2)

        # Setup start and end positions
        scene = self.client.load_settings(settings)
        positions = scene.player_start_spots
        self.start_pos = positions[self.scenario["start_pos_id"]]
        self.end_pos = positions[self.scenario["end_pos_id"]]
        self.start_coord = [
            self.start_pos.location.x // 100, self.start_pos.location.y // 100
        ]
        self.end_coord = [
            self.end_pos.location.x // 100, self.end_pos.location.y // 100
        ]
        print("Start pos {} ({}), end {} ({})".format(
            self.scenario["start_pos_id"], self.start_coord,
            self.scenario["end_pos_id"], self.end_coord))

        # Notify the server that we want to start the episode at the
        # player_start index. This function blocks until the server is ready
        # to start the episode.
        print("Starting new episode...")
        self.client.start_episode(self.scenario["start_pos_id"])

        # Process observations: self._read_observation() returns image and py_measurements.
        image, py_measurements = self._read_observation()
        self.prev_measurement = py_measurements
        return self.encode_obs(self.preprocess_image(image), py_measurements)