Example #1
0
    def build_experiments(self):
        """
            Creates the whole set of experiment objects,
            The experiments created depends on the selected Town.

        """

        # We check the town, based on that we define the town related parameters
        # The size of the vector is related to the number of tasks, inside each
        # task there is also multiple poses ( start end, positions )
        if self._city_name == 'Town01':
            poses_tasks = [[[7, 3]], [[138, 17]], [[140, 134]], [[140, 134]]]
            vehicles_tasks = [0, 0, 0, 20]
            pedestrians_tasks = [0, 0, 0, 50]
        else:
            poses_tasks = [[[4, 2]], [[37, 76]], [[19, 66]], [[19, 66]]]
            vehicles_tasks = [0, 0, 0, 15]
            pedestrians_tasks = [0, 0, 0, 50]

        # We set the camera
        # This single RGB camera is used on every experiment

        camera = Camera('CameraRGB')
        camera.set(FOV=100)
        camera.set_image_size(800, 600)
        camera.set_position(2.0, 0.0, 1.4)
        camera.set_rotation(-15.0, 0, 0)

        # Based on the parameters, creates a vector with experiment objects.
        experiments_vector = []
        for weather in self.weathers:

            for iteration in range(len(poses_tasks)):
                poses = poses_tasks[iteration]
                vehicles = vehicles_tasks[iteration]
                pedestrians = pedestrians_tasks[iteration]

                conditions = CarlaSettings()
                conditions.set(
                    SendNonPlayerAgentsInfo=True,
                    NumberOfVehicles=vehicles,
                    NumberOfPedestrians=pedestrians,
                    WeatherId=weather

                )
                # Add all the cameras that were set for this experiments
                conditions.add_sensor(camera)
                experiment = Experiment()
                experiment.set(
                    Conditions=conditions,
                    Poses=poses,
                    Task=iteration,
                    Repetitions=1
                )
                experiments_vector.append(experiment)

        return experiments_vector
Example #2
0
    def build_experiments(self):
        """
        Creates the whole set of experiment objects,
        The experiments created depend on the selected Town.


        """

        # We set the camera
        # This single RGB camera is used on every experiment

        camera = Camera('CameraRGB')
        camera.set(FOV=100)
        camera.set_image_size(800, 600)
        camera.set_position(2.0, 0.0, 1.4)
        camera.set_rotation(-15.0, 0, 0)

        if self._city_name == 'Town01':
            poses_tasks = self._poses_town01()
            vehicles_tasks = [0, 0, 0, 20]
            pedestrians_tasks = [0, 0, 0, 50]
        else:
            poses_tasks = self._poses_town02()
            vehicles_tasks = [0, 0, 0, 15]
            pedestrians_tasks = [0, 0, 0, 50]

        experiments_vector = []

        for weather in self.weathers:

            for iteration in range(len(poses_tasks)):
                poses = poses_tasks[iteration]
                vehicles = vehicles_tasks[iteration]
                pedestrians = pedestrians_tasks[iteration]

                conditions = CarlaSettings()
                conditions.set(
                    SendNonPlayerAgentsInfo=True,
                    NumberOfVehicles=vehicles,
                    NumberOfPedestrians=pedestrians,
                    WeatherId=weather
                )
                # Add all the cameras that were set for this experiments

                conditions.add_sensor(camera)

                experiment = Experiment()
                experiment.set(
                    Conditions=conditions,
                    Poses=poses,
                    Task=iteration,
                    Repetitions=1
                )
                experiments_vector.append(experiment)

        return experiments_vector
Example #3
0
    def _build_experiments(self):
        """
        Creates the whole set of experiment objects,
        The experiments created depend on the selected Town.
        """

        # We set the camera
        # This single RGB camera is used on every experiment

        camera = Camera('CameraRGB')
        camera.set(FOV=100)

        camera.set_image_size(800, 600)

        camera.set_position(200, 0, 140)
        camera.set_rotation(-15.0, 0, 0)

        weathers = [1, 3, 6, 8, 4, 14]
        if self._city_name == 'Town01':
            poses_tasks = self._poses_town01()
            vehicles_tasks = [0, 0, 0, 20]
            pedestrians_tasks = [0, 0, 0, 50]
        else:
            poses_tasks = self._poses_town02()
            vehicles_tasks = [0, 0, 0, 15]
            pedestrians_tasks = [0, 0, 0, 50]

        experiments_vector = []

        for weather in weathers:

            for iteration in range(len(poses_tasks)):
                poses = poses_tasks[iteration]
                vehicles = vehicles_tasks[iteration]
                pedestrians = pedestrians_tasks[iteration]

                conditions = CarlaSettings()
                conditions.set(SynchronousMode=True,
                               SendNonPlayerAgentsInfo=True,
                               NumberOfVehicles=vehicles,
                               NumberOfPedestrians=pedestrians,
                               WeatherId=weather,
                               SeedVehicles=123456789,
                               SeedPedestrians=123456789)
                # Add all the cameras that were set for this experiments

                conditions.add_sensor(camera)

                experiment = Experiment()
                experiment.set(Conditions=conditions,
                               Poses=poses,
                               Id=iteration,
                               Repetitions=1)
                experiments_vector.append(experiment)

        return experiments_vector
Example #4
0
    def build_experiments(self):
        """
        Creates the whole set of experiment objects,
        The experiments created depend on the selected Town.


        """

        # We set the camera
        # This single RGB camera is used on every experiment

        camera = Camera('CameraRGB')

        camera.set(FOV=90)
        camera.set_image_size(800, 600)
        camera.set_position(1.44, 0.0, 1.2)
        camera.set_rotation(0, 0, 0)

        if self._city_name == 'Town01':
            poses_tasks = self._poses_town01()
            vehicles_tasks = [0, 20, 100]
            pedestrians_tasks = [0, 50, 250]
        else:
            poses_tasks = self._poses_town02()
            vehicles_tasks = [0, 15, 70]
            pedestrians_tasks = [0, 50, 150]

        experiments_vector = []

        for weather in self.weathers:

            for iteration in range(len(poses_tasks)):
                poses = poses_tasks[iteration]
                vehicles = vehicles_tasks[iteration]
                pedestrians = pedestrians_tasks[iteration]

                conditions = CarlaSettings()
                conditions.set(SendNonPlayerAgentsInfo=True,
                               NumberOfVehicles=vehicles,
                               NumberOfPedestrians=pedestrians,
                               WeatherId=weather)

                conditions.set(DisableTwoWheeledVehicles=True)
                # Add all the cameras that were set for this experiments

                conditions.add_sensor(camera)

                experiment = Experiment()
                experiment.set(Conditions=conditions,
                               Poses=poses,
                               Task=iteration,
                               Repetitions=1)
                experiments_vector.append(experiment)

        return experiments_vector
    def build_experiments(self):
        """
        Creates the whole set of experiment objects,
        The experiments created depend on the selected Town.
        """

        # We set the camera
        # This single RGB camera is used on every experiment

        camera = Camera('CameraRGB')
        camera.set(FOV=100)
        camera.set_image_size(800, 600)
        camera.set_position(2.0, 0.0, 1.4)
        camera.set_rotation(-15.0, 0, 0)

        if self._city_name == 'Town01':
            poses_tasks = self._poses_town01()
            vehicles_tasks = [25]
            pedestrians_tasks = [25]
        else:
            poses_tasks = self._poses_town02()
            vehicles_tasks = [0, 25]
            pedestrians_tasks = [0, 25]

        experiments_vector = []
        #weathers = [1,3,6,8]
        for weather in self.weathers:
            #prinst(weather , vehicles_tasks)
            for scenario in range(len(vehicles_tasks)):
                for iteration in range(len(poses_tasks)):
                    #print(f"interation : {iteration} , scenario:{scenario}")
                    poses = poses_tasks[iteration]
                    #print("poses re",poses)
                    vehicles = vehicles_tasks[scenario]
                    #print("Vehicles: ",vehicles)
                    pedestrians = pedestrians_tasks[scenario]
                    #print("pedestrians: ",pedestrians)

                    conditions = CarlaSettings()
                    conditions.set(SendNonPlayerAgentsInfo=True,
                                   NumberOfVehicles=vehicles,
                                   NumberOfPedestrians=pedestrians,
                                   WeatherId=weather)
                    # Add all the cameras that were set for this experiments

                    conditions.add_sensor(camera)

                    experiment = Experiment()
                    experiment.set(Conditions=conditions,
                                   Poses=poses,
                                   Task=iteration,
                                   Repetitions=1)
                    experiments_vector.append(experiment)

        return experiments_vector
Example #6
0
    def build_experiments(self):
        """
        Creates the whole set of experiment objects,
        The experiments created depend on the selected Town.


        """

        # We set the camera
        # This single RGB camera is used on every experiment

        camera = Camera('rgb')
        camera.set(FOV=90)
        camera.set_image_size(300, 300)
        camera.set_position(0.2, 0.0, 0.85)
        camera.set_rotation(-3.0, 0, 0)

        poses_tasks = self._poses()
        vehicles_tasks = [0, 0, 0, 15]
        pedestrians_tasks = [0, 0, 0, 50]



        experiments_vector = []

        for weather in self.weathers:

            for iteration in range(len(poses_tasks)):
                poses = poses_tasks[iteration]
                vehicles = vehicles_tasks[iteration]
                pedestrians = pedestrians_tasks[iteration]

                conditions = CarlaSettings()
                conditions.set(
                    SendNonPlayerAgentsInfo=True,
                    NumberOfVehicles=vehicles,
                    NumberOfPedestrians=pedestrians,
                    WeatherId=weather,
                    QualityLevel='Epic'
                )
                # Add all the cameras that were set for this experiments

                conditions.add_sensor(camera)

                experiment = Experiment()
                experiment.set(
                    Conditions=conditions,
                    Poses=poses,
                    Task=iteration,
                    Repetitions=1
                )
                experiments_vector.append(experiment)

        return experiments_vector
    def build_experiments(self):
        """
            Creates the whole set of experiment objects,
            The experiments created depends on the selected Town.

        """

        # We check the town, based on that we define the town related parameters
        # The size of the vector is related to the number of tasks, inside each
        # task there is also multiple poses ( start end, positions )
        if self._city_name == 'Town01':
            poses_tasks = [[[7, 150]], [[138, 17]]]
            vehicles_tasks = [0, 0, 0, 20]
            pedestrians_tasks = [0, 0, 0, 50]
        else:
            poses_tasks = [[[4, 2]], [[37, 76]]]
            vehicles_tasks = [0, 0, 0, 15]
            pedestrians_tasks = [0, 0, 0, 50]

        # We set the camera
        # This single RGB camera is used on every experiment

        camera = Camera('CameraRGB')
        camera.set(FOV=100)
        camera.set_image_size(800, 600)
        camera.set_position(2.0, 0.0, 1.4)
        camera.set_rotation(-15.0, 0, 0)

        # Based on the parameters, creates a vector with experiment objects.
        experiments_vector = []
        for weather in self.weathers:

            for iteration in range(len(poses_tasks)):
                poses = poses_tasks[iteration]
                vehicles = vehicles_tasks[iteration]
                pedestrians = pedestrians_tasks[iteration]

                conditions = CarlaSettings()
                conditions.set(SendNonPlayerAgentsInfo=True,
                               NumberOfVehicles=vehicles,
                               NumberOfPedestrians=pedestrians,
                               WeatherId=weather)
                # Add all the cameras that were set for this experiments
                conditions.add_sensor(camera)
                experiment = Experiment()
                experiment.set(Conditions=conditions,
                               Poses=poses,
                               Task=iteration,
                               Repetitions=1)
                experiments_vector.append(experiment)

        return experiments_vector
Example #8
0
    def __init__(self,
                 client,
                 frame_skip=1,
                 cam_width=800,
                 cam_height=600,
                 town_string='Town01'):
        super(StraightDriveEnv, self).__init__()
        self.frame_skip = frame_skip
        self.client = client
        self._planner = Planner(town_string)

        camera0 = Camera('CameraRGB')
        camera0.set(CameraFOV=100)
        camera0.set_image_size(cam_height, cam_width)
        camera0.set_position(200, 0, 140)
        camera0.set_rotation(-15.0, 0, 0)

        self.start_goal_pairs = [[36, 40], [39, 35], [110, 114], [7,
                                                                  3], [0, 4],
                                 [68, 50], [61, 59], [47, 64], [147, 90],
                                 [33, 87], [26, 19], [80, 76], [45, 49],
                                 [55, 44], [29, 107], [95, 104], [84, 34],
                                 [53, 67], [22, 17], [91, 148], [20, 107],
                                 [78, 70], [95, 102], [68, 44], [45, 69]]

        vehicles = 0
        pedestrians = 0
        weather = 1

        settings = CarlaSettings()
        settings.set(
            SynchronousMode=True,
            SendNonPlayerAgentsInfo=True,
            NumberOfVehicles=vehicles,
            NumberOfPedestrians=pedestrians,
            WeatherId=weather,
        )

        settings.randomize_seeds()
        settings.add_sensor(camera0)

        self.scene = self.client.load_settings(settings)

        img_shape = (cam_width, cam_height, 3)
        self.observation_space = spaces.Tuple(
            (spaces.Box(-np.inf, np.inf, (3, )), spaces.Box(0, 255,
                                                            img_shape)))
        self.action_space = spaces.Box(-np.inf, np.inf, shape=(3, ))

        self.prev_state = np.array([0., 0., 0.])
        self.prev_collisions = np.array([0., 0., 0.])
        self.prev_intersections = np.array([0., 0.])
    def build_experiments(self):
        """
        Creates the whole set of experiment objects,
        The experiments created depend on the selected Town.
        """

        # We set the camera
        # This single RGB camera is used on every experiment

        camera = Camera('rgb')
        camera.set(FOV=100)
        camera.set_image_size(800, 600)
        camera.set_position(2.0, 0.0, 1.4)
        camera.set_rotation(-15.0, 0, 0)

        poses_tasks = self._poses()
        vehicles_tasks = [0, 0, 0, 20]
        pedestrians_tasks = [0, 0, 0, 50]

        task_names = ['straight', 'one_curve', 'navigation', 'navigation_dyn']

        experiments_vector = []

        for weather in self.weathers:

            for iteration in range(len(poses_tasks)):
                poses = poses_tasks[iteration]
                vehicles = vehicles_tasks[iteration]
                pedestrians = pedestrians_tasks[iteration]

                conditions = CarlaSettings()
                conditions.set(SendNonPlayerAgentsInfo=True,
                               NumberOfVehicles=vehicles,
                               NumberOfPedestrians=pedestrians,
                               WeatherId=weather)
                conditions.set(DisableTwoWheeledVehicles=True)
                # Add all the cameras that were set for this experiments

                conditions.add_sensor(camera)

                experiment = Experiment()
                experiment.set(Conditions=conditions,
                               Poses=poses,
                               Task=iteration,
                               TaskName=task_names[iteration],
                               Repetitions=1)
                experiments_vector.append(experiment)

        return experiments_vector
Example #10
0
    def add_cameras(self, settings, cameras, camera_width, camera_height):
        # add a front facing camera
        print("Available Cameras are ", cameras)
        if CameraTypes.FRONT in cameras:
            camera = Camera(CameraTypes.FRONT.value)
            camera.set(FOV=100)
            camera.set_image_size(camera_width, camera_height)
            camera.set_position(2.0, 0, 1.4)
            camera.set_rotation(-15.0, 0, 0)
            settings.add_sensor(camera)

        # add a left facing camera
        if CameraTypes.LEFT in cameras:
            camera = Camera(CameraTypes.LEFT.value)
            camera.set(FOV=100)
            camera.set_image_size(camera_width, camera_height)
            camera.set_position(2.0, 0, 1.4)
            camera.set_rotation(-15.0, -30, 0)
            settings.add_sensor(camera)

        # add a right facing camera
        if CameraTypes.RIGHT in cameras:
            camera = Camera(CameraTypes.RIGHT.value)
            camera.set(FOV=100)
            camera.set_image_size(camera_width, camera_height)
            camera.set_position(2.0, 0, 1.4)
            camera.set_rotation(-15.0, 30, 0)
            settings.add_sensor(camera)

        # add a front facing depth camera
        if CameraTypes.DEPTH in cameras:
            camera = Camera(CameraTypes.DEPTH.value)
            camera.set_image_size(camera_width, camera_height)
            camera.set_position(0.2, 0, 1.3)
            camera.set_rotation(8, 30, 0)
            camera.PostProcessing = 'Depth'
            settings.add_sensor(camera)

        # add a front facing semantic segmentation camera
        if CameraTypes.SEGMENTATION in cameras:
            camera = Camera(CameraTypes.SEGMENTATION.value)
            camera.set_image_size(camera_width, camera_height)
            camera.set_position(0.2, 0, 1.3)
            camera.set_rotation(8, 30, 0)
            camera.PostProcessing = 'SemanticSegmentation'
            settings.add_sensor(camera)
            print("Successfully adding a SemanticSegmentation camera")

        return settings
Example #11
0
    def _reset(self):
        self.num_steps = 0
        self.total_reward = 0
        self.prev_measurement = None
        self.prev_image = None
        self.episode_id = datetime.today().strftime("%Y-%m-%d_%H-%M-%S_%f")
        self.measurements_file = None

        self.pre_intersection = np.array([0.0, 0.0])

        # Create a CarlaSettings object. This object is a wrapper around
        # the CarlaSettings.ini file. Here we set the configuration we
        # want for the new episode.
        settings = CarlaSettings()
        self.scenario = random.choice(self.config["scenarios"])
        assert self.scenario["city"] == self.city, (self.scenario, self.city)
        self.weather = random.choice(self.scenario["weather_distribution"])
        settings.set(
            SynchronousMode=True,
            # ServerTimeOut=10000, # CarlaSettings: no key named 'ServerTimeOut'
            SendNonPlayerAgentsInfo=True,
            NumberOfVehicles=self.scenario["num_vehicles"],
            NumberOfPedestrians=self.scenario["num_pedestrians"],
            WeatherId=self.weather)
        settings.randomize_seeds()

        if self.config["use_sensor"] == 'use_semantic':
            camera0 = Camera("CameraSemantic",
                             PostProcessing="SemanticSegmentation")
            camera0.set_image_size(self.config["render_x_res"],
                                   self.config["render_y_res"])
            # camera0.set_position(30, 0, 130)
            camera0.set(FOV=120)
            camera0.set_position(2.0, 0.0, 1.4)
            camera0.set_rotation(0.0, 0.0, 0.0)

            settings.add_sensor(camera0)

        if self.config["use_sensor"] in ['use_depth', 'use_logdepth']:
            camera1 = Camera("CameraDepth", PostProcessing="Depth")
            camera1.set_image_size(self.config["render_x_res"],
                                   self.config["render_y_res"])
            # camera1.set_position(30, 0, 130)
            camera1.set(FOV=120)
            camera1.set_position(2.0, 0.0, 1.4)
            camera1.set_rotation(0.0, 0.0, 0.0)

            settings.add_sensor(camera1)

        if self.config["use_sensor"] == 'use_rgb':
            camera2 = Camera("CameraRGB")
            camera2.set_image_size(self.config["render_x_res"],
                                   self.config["render_y_res"])
            # camera2.set_position(30, 0, 130)
            # camera2.set_position(0.3, 0.0, 1.3)
            camera2.set(FOV=120)
            camera2.set_position(2.0, 0.0, 1.4)
            camera2.set_rotation(0.0, 0.0, 0.0)

            settings.add_sensor(camera2)

        if self.config["use_sensor"] == 'use_2rgb':
            camera_l = Camera("CameraRGB_L")
            camera_l.set_image_size(self.config["render_x_res"],
                                    self.config["render_y_res"])
            camera_l.set(FOV=120)
            camera_l.set_position(2.0, -0.1, 1.4)
            camera_l.set_rotation(0.0, 0.0, 0.0)
            settings.add_sensor(camera_l)

            camera_r = Camera("CameraRGB_R")
            camera_r.set_image_size(self.config["render_x_res"],
                                    self.config["render_y_res"])
            camera_r.set(FOV=120)
            camera_r.set_position(2.0, 0.1, 1.4)
            camera_r.set_rotation(0.0, 0.0, 0.0)
            settings.add_sensor(camera_r)

        # Setup start and end positions
        scene = self.client.load_settings(settings)
        self.positions = positions = scene.player_start_spots
        self.start_pos = positions[self.scenario["start_pos_id"]]

        self.pre_pos = self.start_pos.location
        self.cnt1 = 0
        self.displacement = 1000.0

        self.end_pos = positions[self.scenario["end_pos_id"]]
        self.start_coord = [
            self.start_pos.location.x // 1, self.start_pos.location.y // 1
        ]
        self.end_coord = [
            self.end_pos.location.x // 1, self.end_pos.location.y // 1
        ]
        print("Start pos {} ({}), end {} ({})".format(
            self.scenario["start_pos_id"], self.start_coord,
            self.scenario["end_pos_id"], self.end_coord))

        # Notify the server that we want to start the episode at the
        # player_start index. This function blocks until the server is ready
        # to start the episode.
        print("Starting new episode...")
        self.client.start_episode(self.scenario["start_pos_id"])

        # remove the vehicle dropping when starting a new episode.
        cnt = 1
        z1 = 0
        zero_control = VehicleControl()
        while (cnt < 3):
            self.client.send_control(
                zero_control
            )  # VehicleControl().steer = 0, VehicleControl().throttle = 0, VehicleControl().reverse = False
            z0 = z1
            z1 = self.client.read_data(
            )[0].player_measurements.transform.location.z
            print(z1)
            if z0 - z1 == 0:
                cnt += 1
        print('Starting new episode done.\n')

        # Process observations: self._read_observation() returns image and py_measurements.
        image, py_measurements = self._read_observation()
        self.prev_measurement = py_measurements

        # self.current_heading = self.pre_heading = np.array([py_measurements["x_orient"], py_measurements["y_orient"]])
        # self.angular_speed = 0.0

        self.pre_heading_degree = self.current_heading_degree = py_measurements[
            "current_heading_degree"]
        self.angular_speed_degree = np.array(0.0)

        return self.encode_obs(self.preprocess_image(image),
                               py_measurements), py_measurements
    def build_experiments(self):
        """
        Creates the whole set of experiment objects,
        The experiments created depend on the selected Town.


        """

        # We set the camera
        # This single RGB camera is used on every experiment

        camera = Camera('CameraRGB')
        camera.set(FOV=100)
        camera.set_image_size(800, 600)
        camera.set_position(2.0, 0.0, 1.4)
        camera.set_rotation(-15.0, 0, 0)
        if self._task == 'go-straight':
            task_select = 0
        elif self._task == 'turn-right':
            task_select = 1
        elif self._task == 'turn-left':
            task_select = 2
        elif self._task == 'go-straight-2':
            task_select = 3
        elif self._task == 'turn-right-2':
            task_select = 4
        elif self._task == 'turn-left-2':
            task_select = 5

        if self._city_name == 'Town01':
            poses_tasks = self._poses_town01()
            vehicles_tasks = [0, 0, 0, 20]
            pedestrians_tasks = [0, 0, 0, 50]
        elif self._city_name == 'Town02':
            poses_tasks = self._poses_town02()
            vehicles_tasks = [0, 0, 0, 20]
            pedestrians_tasks = [0, 0, 0, 50]
        elif self._city_name[:-1] == 'Town01_nemesis':
            poses_tasks = [self._poses_town01_nemesis()[task_select]]
            vehicles_tasks = [0, 0, 0, 0, 0, 0]
            pedestrians_tasks = [0, 0, 0, 0, 0, 0]
        elif self._city_name[:-1] == 'Town02_nemesis':
            poses_tasks = [self._poses_town02_nemesis()[task_select]]
            vehicles_tasks = [0, 0, 0, 0, 0, 0]
            pedestrians_tasks = [0, 0, 0, 0, 0, 0]

        experiments_vector = []
        """repeating experiment"""
        num_iters = self._iters
        weathers_iters = [self._weather] * num_iters

        # 0 - Default
        # 1 - ClearNoon
        # 2 - CloudyNoon
        # 3 - WetNoon
        # 4 - WetCloudyNoon
        # 5 - MidRainyNoon
        # 6 - HardRainNoon
        # 7 - SoftRainNoon
        # 8 - ClearSunset
        # 9 - CloudySunset
        # 10 - WetSunset
        # 11 - WetCloudySunset
        # 12 - MidRainSunset
        # 13 - HardRainSunset
        # 14 - SoftRainSunset

        for weather in weathers_iters:

            for iteration in range(len(poses_tasks)):
                poses = poses_tasks[iteration]
                vehicles = vehicles_tasks[iteration]
                pedestrians = pedestrians_tasks[iteration]

                ## add here
                # random the scenario
                #weather_num_random = int(random.uniform(0,14))
                #vehicles_num_random = int(random.uniform(0,1000))
                #pedestrain_num_random = int(random.uniform(0,1000))
                #vehicles = vehicles_num_random
                #pedestrains = pedestrain_num_random
                #weather = weather_num_random
                vehicles = 400
                pedestrains = 400
                #weather =
                ##

                conditions = CarlaSettings()
                conditions.set(SendNonPlayerAgentsInfo=True,
                               NumberOfVehicles=vehicles,
                               NumberOfPedestrians=pedestrians,
                               WeatherId=weather)
                # Add all the cameras that were set for this experiments

                conditions.add_sensor(camera)

                experiment = Experiment()
                experiment.set(Conditions=conditions,
                               Poses=poses,
                               Task=iteration,
                               Repetitions=1)
                experiments_vector.append(experiment)

        return experiments_vector
Example #13
0
    def _reset(self):
        self.num_steps = 0
        self.total_reward = 0
        self.prev_measurement = None
        self.prev_image = None
        self.episode_id = datetime.today().strftime("%Y-%m-%d_%H-%M-%S_%f")
        self.measurements_file = None

        # Create a CarlaSettings object. This object is a wrapper around
        # the CarlaSettings.ini file. Here we set the configuration we
        # want for the new episode.
        settings = CarlaSettings()
        self.scenario = random.choice(self.config["scenarios"])
        assert self.scenario["city"] == self.city, (self.scenario, self.city)
        self.weather = random.choice(self.scenario["weather_distribution"])
        settings.set(
            SynchronousMode=True,
            # ServerTimeOut=10000, # CarlaSettings: no key named 'ServerTimeOut'
            SendNonPlayerAgentsInfo=True,
            NumberOfVehicles=self.scenario["num_vehicles"],
            NumberOfPedestrians=self.scenario["num_pedestrians"],
            WeatherId=self.weather)
        settings.randomize_seeds()

        if self.config["use_depth_camera"]:
            camera1 = Camera("CameraDepth", PostProcessing="Depth")
            camera1.set_image_size(self.config["render_x_res"],
                                   self.config["render_y_res"])
            # camera1.set_position(30, 0, 130)
            camera1.set(FOV=120)
            camera1.set_position(2.0, 0.0, 1.4)
            camera1.set_rotation(0.0, 0.0, 0.0)

            settings.add_sensor(camera1)

        camera2 = Camera("CameraRGB")
        camera2.set_image_size(self.config["render_x_res"],
                               self.config["render_y_res"])
        # camera2.set_position(30, 0, 130)
        # camera2.set_position(0.3, 0.0, 1.3)
        camera2.set(FOV=120)
        camera2.set_position(2.0, 0.0, 1.4)
        camera2.set_rotation(0.0, 0.0, 0.0)

        settings.add_sensor(camera2)

        # Setup start and end positions
        scene = self.client.load_settings(settings)
        positions = scene.player_start_spots
        self.start_pos = positions[self.scenario["start_pos_id"]]
        self.end_pos = positions[self.scenario["end_pos_id"]]
        self.start_coord = [
            self.start_pos.location.x // 100, self.start_pos.location.y // 100
        ]
        self.end_coord = [
            self.end_pos.location.x // 100, self.end_pos.location.y // 100
        ]
        print("Start pos {} ({}), end {} ({})".format(
            self.scenario["start_pos_id"], self.start_coord,
            self.scenario["end_pos_id"], self.end_coord))

        # Notify the server that we want to start the episode at the
        # player_start index. This function blocks until the server is ready
        # to start the episode.
        print("Starting new episode...")
        self.client.start_episode(self.scenario["start_pos_id"])

        # Process observations: self._read_observation() returns image and py_measurements.
        image, py_measurements = self._read_observation()
        self.prev_measurement = py_measurements
        return self.encode_obs(self.preprocess_image(image), py_measurements)
def run_carla_client(args, classifier, plt, plt_index):
    global global_steering_indicator
    # Here we will run 3 episodes with 300 frames each.
    frames_per_episode = args.frames
    
    if args.position:
      number_of_episodes = 1
    else:
      number_of_episodes = 3

    # We assume the CARLA server is already waiting for a client to connect at
    # host:port. To create a connection we can use the `make_carla_client`
    # context manager, it creates a CARLA client object and starts the
    # connection. It will throw an exception if something goes wrong. The
    # context manager makes sure the connection is always cleaned up on exit.
    with make_carla_client(args.host, args.port) as client:
        print('CarlaClient connected')

        for episode in range(0, number_of_episodes):
            # Start a new episode.

            if args.settings_filepath is None:

                # Create a CarlaSettings object. This object is a wrapper around
                # the CarlaSettings.ini file. Here we set the configuration we
                # want for the new episode.
                settings = CarlaSettings()
                settings.set(
                    SynchronousMode=True,
                    SendNonPlayerAgentsInfo=True,
                    NumberOfVehicles=0,
                    NumberOfPedestrians=0,
                    WeatherId=random.choice([2]),
                    QualityLevel=args.quality_level)
                settings.randomize_seeds()

                # Now we want to add a couple of cameras to the player vehicle.
                # We will collect the images produced by these cameras every
                # frame.

                # # The default camera captures RGB images of the scene.
                # camera0 = Camera('CameraRGB')
                # # Set image resolution in pixels.
                # camera0.set_image_size(256, 256)
                # # Set its position relative to the car in meters.
                # camera0.set_position(2.2, 0, 1.30)
                # settings.add_sensor(camera0)

                if args.lidar:
                    # Adding a lidar sensor
                    lidar = Lidar('Lidar32')
                    lidar.set_position(0, 0, 2.50)
                    lidar.set_rotation(0, 0, 0)
                    lidar.set(
                        Channels=32,
                        Range=50,
                        PointsPerSecond=args.lidar_pps,
                        RotationFrequency=10,
                        UpperFovLimit=10,
                        LowerFovLimit=-30)
                    settings.add_sensor(lidar)
                else:
                    # Let's add another camera producing ground-truth depth.
                    camera1 = Camera('CameraDepth', PostProcessing='Depth')
                    camera1.set_image_size(256, 256)
                    camera1.set_position(2.2, 0, 1.30)
                    camera1.set(FOV=90.0)
                    #camera1.set_rotation(pitch=-8, yaw=0, roll=0)
                    settings.add_sensor(camera1)

                    camera2 = Camera('CameraSeg', PostProcessing='SemanticSegmentation')
                    camera2.set_image_size(256, 256)
                    camera2.set_position(2.2, 0, 1.30)
                    camera2.set(FOV=90.0)
                    #camera2.set_rotation(pitch=-8, yaw=0, roll=0)
                    settings.add_sensor(camera2)

                if args.capture:
                  camera3 = Camera('CameraRGB')
                  camera3.set_image_size(512, 256)
                  camera3.set_position(-8, 0, 5)
                  camera3.set(FOV=90.0)
                  camera3.set_rotation(pitch=-20, yaw=0, roll=0)
                  settings.add_sensor(camera3)


            else:

                # Alternatively, we can load these settings from a file.
                with open(args.settings_filepath, 'r') as fp:
                    settings = fp.read()

            # Now we load these settings into the server. The server replies
            # with a scene description containing the available start spots for
            # the player. Here we can provide a CarlaSettings object or a
            # CarlaSettings.ini file as string.
            scene = client.load_settings(settings)

            # Choose one player start at random.
            number_of_player_starts = len(scene.player_start_spots)
            if args.position:
              player_start = args.position
            else:
              player_start = random.choice([42,67,69,79,94,97, 70, 44,85,102])
            

            # Notify the server that we want to start the episode at the
            # player_start index. This function blocks until the server is ready
            # to start the episode.
            print('Starting new episode at %r...' % scene.map_name)

            """ Begin added code """
            if args.capture:
              directory = '_capture/pos{}'.format(player_start)
              if not os.path.exists(directory):
                  os.makedirs(directory)
              print("Capturing point clouds to {}".format(directory))


            """ End added code """

            client.start_episode(player_start)

            # Iterate every frame in the episode.
            for frame in range(0, frames_per_episode):

                # Read the data produced by the server this frame.
                measurements, sensor_data = client.read_data()

                """ Begin added code """
                # dict of steering directions of available curves
                # [1,0] if the next curve will be a left one
                # [0,1] if the next curve will be a right one
                directions = {
                  67: [[1,0]], # straight
                  42: [[1,0]], # straight or left
                  69: [[0,1]], # right
                  79: [[0,1]], # right
                  94: [[0,1]], # right
                  97: [[0,1]], # right
                  70: [[1,0]], # left
                  44: [[1,0]], # left
                  85: [[1,0]], # left
                  102: [[1,0]] # left
                }

                #dynamically set the global steering indicator during the game
                if(args.key_control):
                  set_steering_indicator_from_keypress()

                steering_direction = args.steering_indicator or global_steering_indicator

                if args.use_steering_indicator:
                  if steering_direction == "left":
                    steering_indicator = [[1,0]]
                  elif steering_direction == "right":
                    steering_indicator = [[0,1]]
                  else:
                    steering_indicator = [[0,0]]
                  steering_indicator = torch.Tensor(steering_indicator)
                  if cuda_available:
                    steering_indicator = steering_indicator.cuda()

                if args.lidar:
                    point_cloud = sensor_data['Lidar32'].data
                    if args.lidar_fov == 180:
                      print("FOV 180")
                      print("Before", point_cloud.shape)
                      point_cloud = filter_fov(point_cloud)
                      print("After", point_cloud.shape)


                else:
                    # Get depth and seg as numpy array for further processing
                    depth_obj = sensor_data['CameraDepth']
                    depth = depth_obj.data
                    fov = depth_obj.fov
                    
                    # Filter seg to get intersection points
                    seg = sensor_data['CameraSeg'].data
                    filtered_seg = pp.filter_seg_image(seg)

                    # Add following lines to measure performance of generating pointcloud
                    # def f():
                    #   return pp.depth_to_local_point_cloud(depth, fov, filtered_seg)
                    # print(timeit.timeit(f, number=1000) / 1000)

                    # Create pointcloud from seg and depth (but only take intersection points)
                    point_cloud = pp.depth_to_local_point_cloud(depth, fov, filtered_seg)

                # Save point cloud for later visualization
                if args.capture:
                  pp.save_to_disk(point_cloud, "{}/point_clouds/point_cloud_{:0>5d}.ply".format(directory, frame))
                  sensor_data['CameraRGB'].save_to_disk('{}/images/image_{:0>5d}.png'.format(directory, frame))
                

                # Normalizing the point cloud
                if not args.no_center or not args.no_norm:
                  point_cloud = point_cloud - np.expand_dims(np.mean(point_cloud, axis=0), 0)  # center
                
                if not args.no_norm:
                  dist = np.max(np.sqrt(np.sum(point_cloud ** 2, axis=1)), 0)
                  point_cloud = point_cloud / dist  # scale

                #pp.save_point_cloud(point_cloud, 'test')
                # pp.save_seg_image(filtered_seg)
                """ End added code """
                # Print some of the measurements.
                #print_measurements(measurements)

                # # Save the images to disk if requested.
                # if args.save_images_to_disk:
                #     for name, measurement in sensor_data.items():
                #         filename = args.out_filename_format.format(episode, name, frame)
                #         measurement.save_to_disk(filename)

                # We can access the encoded data of a given image as numpy
                # array using its "data" property. For instance, to get the
                # depth value (normalized) at pixel X, Y
                #
                #     depth_array = sensor_data['CameraDepth'].data
                #     value_at_pixel = depth_array[Y, X]
                #

                # Now we have to send the instructions to control the vehicle.
                # If we are in synchronous mode the server will pause the
                # simulation until we send this control.
                
                if not args.autopilot:
                    client.send_control(
                        steer=random.uniform(-1.0, 1.0),
                        throttle=0.5,
                        brake=0.0,
                        hand_brake=False,
                        reverse=False)

                else:

                    # Together with the measurements, the server has sent the
                    # control that the in-game autopilot would do this frame. We
                    # can enable autopilot by sending back this control to the
                    # server. We can modify it if wanted, here for instance we
                    # will add some noise to the steer.


                    """ Beginn added code """
                    control = measurements.player_measurements.autopilot_control
                    #steer = control.steer
                    #print(control)

                    point_cloud = point_cloud.transpose()
                    points = np.expand_dims(point_cloud, axis=0)
                    points = torch.from_numpy(points.astype(np.float32))
                    #print(points)
                    if cuda_available:
                      points = points.cuda()
                    classifier = classifier.eval()

                    if args.use_steering_indicator:
                      #print("Using PointNetReg2")
                      steer, _, _ = classifier((points, steering_indicator))
                    else:
                      #print("Using PointNetReg")
                      steer, _, _ = classifier(points)
                    steer = steer.item()
                    if args.use_steering_indicator:
                      print("Using steering indicator: {} / {}".format(steering_direction, steering_indicator))
                    print("Autopilot steer: ", control.steer)
                    print("Pointnet steer: ", steer)
                    print("Relative difference: ", steer / control.steer if control.steer != 0.0 else 'NaN')
                    print("Absolute difference: ", control.steer - steer)
                    print("FOV:", args.lidar_fov)

                    # Plot some visualizations to visdom
                    if args.visdom:
                      plt.plot_point_cloud(points, var_name='pointcloud')
                      plt.plot('steering', 'PointNet', 'Steering', plt_index, steer)
                      plt.plot('steering', 'Autopilot', 'Steering', plt_index, control.steer)
                      plt_index += 1

                    # Let pointnet take over steering control
                    if True:
                      print("Who's in command: POINTNET")
                      control.steer = steer
                      if args.ignore_red_lights or args.use_steering_indicator:
                        control.throttle = 0.5
                        control.brake=0.0
                        hand_brake=False

                    else:
                      print("Who's in command: AUTOPILOT")

                    print("_________")
                    #pdb.set_trace()
                    """ End added code """
                    client.send_control(control)
Example #15
0
def run_carla_client(args,
                     number_of_episodes=10,
                     frames_per_episode=500,
                     starting_episode=0):
    with make_carla_client("localhost", 2000) as client:
        print('carla client connected')
        # setting up data type
        imitation_type = np.dtype([('image', np.uint8, (512, 512, 3)),
                                   ('label', np.float32, 5)])
        print("datatype :\n{}".format(imitation_type))
        # total frames collected this run
        total_frames = 0

        for episode in range(starting_episode, number_of_episodes):
            # flag to skip the initial frames where the car doesn't move
            record = False
            # settings
            settings = CarlaSettings()
            if args.settings_filepath is None:
                settings.set(
                    SynchronousMode=True,
                    SendNonPlayerAgentsInfo=True,
                    NumberOfVehicles=20,
                    NumberOfPedestrians=40,
                    WeatherId=1,  # clear noon
                    QualityLevel='Low')  # QualityLevel=args.quality_level
                settings.randomize_seeds()

                camera = Camera('RGBFront', PostProcessing='SceneFinal')
                camera.set_image_size(512, 512)
                camera.set(FOV=120.0)
                # camera.set_position(1.65, 0, 1.30) < OLD
                camera.set_position(2.0, 0, 1.60)
                camera.set_rotation(roll=0, pitch=-10, yaw=0)
                settings.add_sensor(camera)
            else:
                # load settings from file
                with open(args.settings_filepath, 'r') as fp:
                    settings = fp.read()

            # choosing starting position, starting episode
            scene = client.load_settings(settings)
            number_of_player_starts = len(scene.player_start_spots)
            # going through them one by one
            player_start = episode % number_of_player_starts
            print("starting new episode ({})... {} frames saved".format(
                episode, total_frames))
            client.start_episode(player_start)

            # keeping track of the frames that are actually being saved
            frame_index = 0
            # new episode, new dataset
            dataset = hdf5_file.create_dataset("episode_{}".format(episode),
                                               shape=(1, ),
                                               maxshape=(None, ),
                                               chunks=(1, ),
                                               compression="lzf",
                                               dtype=imitation_type)
            # execute frames
            for frame in range(0, frames_per_episode):
                measurements, sensor_data = client.read_data()
                if frame == 0:
                    initial_pose = measurements.player_measurements.transform.location
                elif not record:
                    distance = distance_3d(
                        initial_pose,
                        measurements.player_measurements.transform.location)
                    record = distance > 1.5
                else:
                    frame_index += 1

                # print_measurements(measurements)
                for name, measurement in sensor_data.items():
                    if record and args.save_images_to_disk:
                        filename = "{}_e{}_f{:02d}".format(
                            name, episode, frame_index)
                        measurement.save_to_disk(
                            os.path.join("./data", filename))

                # getting autopilot controls
                control = measurements.player_measurements.autopilot_control
                # SDL_VIDEODRIVER = offscreen
                if record:
                    label = np.ndarray(shape=(5, ), dtype=float)
                    control.steer = control.steer if np.abs(
                        control.steer) > 5e-4 else 0
                    label[0] = control.steer
                    label[1] = control.throttle
                    label[2] = control.brake
                    label[3] = 1 if control.hand_brake else 0
                    label[4] = 1 if control.reverse else 0

                    frame = sensor_data['RGBFront'].data
                    frame = np.transpose(frame, (1, 0, 2))
                    data = np.array([(frame, label)], dtype=imitation_type)
                    dataset.resize(frame_index + 2, axis=0)
                    dataset[frame_index] = data
                    total_frames += 1

                client.send_control(control)
Example #16
0
def create_settings(n_vehicles, n_pedestrians, quality_level,
                    image_size, use_lidar, root_dir, lidar_params, z_BEV=25):
    # Create a CarlaSettings object. This object is a wrapper around
    # the CarlaSettings.ini file. Here we set the configuration we
    # want for the new episode.
    settings = CarlaSettings()

    # Make sure that the server is running with '-benchmark' flag
    settings.set(
        SynchronousMode=True,
        SendNonPlayerAgentsInfo=True,
        NumberOfVehicles=n_vehicles,
        NumberOfPedestrians=n_pedestrians,
        # WeatherId=4,
        WeatherId=1,
        # WeatherId=random.choice([1, 3, 7, 8, 14]),
        QualityLevel=quality_level)
    settings.randomize_seeds()
    log.info("Vehicle seed: {}".format(settings.SeedVehicles))

    # TODO make lidar params pay attention to the H x W of BEV?
    settings.lidar_params = lidar_params

    # Now we want to add a couple of cameras to the player vehicle.
    # We will collect the images produced by these cameras every
    # frame.

    # The default camera captures RGB images of the scene.
    front_cam_rgb = Camera('CameraRGB')
    front_cam_position = (0., 0., 1.5)

    # angel_cam_position  = (-5.5, 0.0, 2.8)
    # angel_cam_position  = (-5.5, 0.0, 3.8)
    angel_cam_position  = (-7.5, 0.0, 4.8)    

    # For visualization.
    front_cam_rgb.FOV = 90

    # Set image resolution in pixels.
    front_cam_rgb.set_image_size(*image_size)

    # Set its position relative to the car in meters.
    # Default forward-facing
    # camera0.set_position(0.30, 0, 1.30)
    # front_cam_rgb.set_position(*front_cam_position)
    front_cam_rgb.set_position(*angel_cam_position)
    # front_cam_rgb.set_rotation(roll=0., pitch=-15., yaw=0.0)
    front_cam_rgb.set_rotation(roll=0., pitch=-15., yaw=0.0)
    settings.add_sensor(front_cam_rgb)

    # Bird's eye view.
    bev_cam_rgb = Camera('CameraRGBBEV')
    # Note that at FOV=90, image width across ground plane is z*2 (if camera is parallel to ground)
    # z_BEV = 50
    bev_cam_rgb.set_position(x=0, y=0, z=z_BEV)
    bev_cam_rgb.set_rotation(pitch=-90, roll=0, yaw=-90)
    bev_cam_rgb.set_image_size(400, 400)
    # bev_cam_rgb.set_image_size(200, 200)
    
    # bev_cam_rgb.set_image_size(*image_size)
    settings.add_sensor(bev_cam_rgb)

    # bev_cam_sem = Camera('CameraSemanticBEV', PostProcessing='SemanticSegmentation')
    # # Note that at FOV=90, image width across ground plane is z*2 (if camera is parallel to ground)
    # bev_cam_sem.set_position(x=0, y=0, z=z_BEV)
    # bev_cam_sem.set_rotation(pitch=-90, roll=0, yaw=0)
    # bev_cam_sem.set_image_size(*image_size)
    # settings.add_sensor(bev_cam_sem)

    # Let's add another camera producing ground-truth depth.
    front_cam_depth = Camera('CameraDepth', PostProcessing='Depth')
    front_cam_depth.set_image_size(*image_size)
    front_cam_depth.set_position(*front_cam_position)
    settings.add_sensor(front_cam_depth)

    # Semantic segmentation is black! Even in the demo code of manual_control.py ... Might be an issue with graphics card / Unreal support?
    # See https://github.com/carla-simulator/carla/issues/151
    camera = Camera('CameraSemantic', PostProcessing='SemanticSegmentation')
    camera.set(FOV=90.0)
    camera.set_image_size(*image_size)
    camera.set_position(*front_cam_position)
    camera.set_rotation(pitch=0, yaw=0, roll=0)
    settings.add_sensor(camera)

    if use_lidar:
        settings.add_sensor(create_lidar())
    else:
        log.info("Not adding lidar")
    return settings
Example #17
0
    def build_experiments(self):
        """
        Creates the whole set of experiment objects,
        The experiments created depend on the selected Town.
        """

        # We set the camera
        # This single RGB camera is used on every experiment

        camera = Camera('CameraRGB')
        camera.set(FOV=100)
        camera.set_image_size(800, 600)
        camera.set_position(2.0, 0.0, 1.4)
        camera.set_rotation(-15.0, 0, 0)

        if self._city_name == 'Town01':

            if self._subset == 'keep_lane':
                poses_tasks = [self._poses_town01()[0]]
                vehicles_tasks = [0]
                pedestrians_tasks = [0]

            elif self._subset == 'one_turn':
                poses_tasks = [self._poses_town01()[1]]
                vehicles_tasks = [0]
                pedestrians_tasks = [0]

            elif self._subset == 'keep_lane_one_turn':
                poses_tasks = self._poses_town01()[:2]
                vehicles_tasks = [0, 0]
                pedestrians_tasks = [0, 0]

            elif self._subset == 'no_dynamic_objects':
                poses_tasks = self._poses_town01()[:3]
                vehicles_tasks = [0, 0, 0]
                pedestrians_tasks = [0, 0, 0]

            elif self._subset is None:
                poses_tasks = self._poses_town01()
                vehicles_tasks = [0, 0, 0, 20]
                pedestrians_tasks = [0, 0, 0, 50]

            else:
                raise ValueError(
                    "experiments-subset must be keep_lane or keep_lane_one_turn or no_dynamic_objects or None"
                )

        else:
            raise ValueError("city must be Town01 for training")

        experiments_vector = []

        for i, iteration in enumerate(range(len(poses_tasks))):

            for weather in self.weathers:

                poses = poses_tasks[iteration]
                vehicles = vehicles_tasks[iteration]
                pedestrians = pedestrians_tasks[iteration]

                conditions = CarlaSettings()
                conditions.set(SendNonPlayerAgentsInfo=True,
                               NumberOfVehicles=vehicles,
                               NumberOfPedestrians=pedestrians,
                               WeatherId=weather)
                # Add all the cameras that were set for this experiments

                conditions.add_sensor(camera)

                experiment = Experiment()
                experiment.set(Conditions=conditions,
                               Poses=poses,
                               Task=iteration,
                               Repetitions=1)

                experiments_vector.append(experiment)

        return experiments_vector
Example #18
0
def run_carla_client(args):
    # Here we will run 3 episodes with 300 frames each.
    number_of_episodes = 1
    frames_per_episode = 300

    # We assume the CARLA server is already waiting for a client to connect at
    # host:port. To create a connection we can use the `make_carla_client`
    # context manager, it creates a CARLA client object and starts the
    # connection. It will throw an exception if something goes wrong. The
    # context manager makes sure the connection is always cleaned up on exit.
    with make_carla_client(args.host, args.port) as client:
        print('CarlaClient connected')

        for episode in range(0, number_of_episodes):
            # Start a new episode.

            if args.settings_filepath is None:

                # Create a CarlaSettings object. This object is a wrapper around
                # the CarlaSettings.ini file. Here we set the configuration we
                # want for the new episode.
                settings = CarlaSettings()
                settings.set(
                    SynchronousMode=True,
                    SendNonPlayerAgentsInfo=True,
                    NumberOfVehicles=20,
                    NumberOfPedestrians=40,
                    # WeatherId=random.choice([1, 3, 7, 8, 14]),
                    WeatherId=1,
                    QualityLevel=args.quality_level)
                settings.randomize_seeds()

                # Now we want to add a couple of cameras to the player vehicle.
                # We will collect the images produced by these cameras every
                # frame.

                # The default camera captures RGB images of the scene.
                camera0 = Camera('CameraRGB')
                # Set image resolution in pixels.
                camera0.set_image_size(800, 600)
                # Set its position relative to the car in meters.
                camera0.set_position(0.30, 0, 1.30)
                settings.add_sensor(camera0)

                ############# GETTING RIGHT CAMERA IMAGES
                camera2 = Camera('CameraRGBRight')
                # Set image resolution in pixels.
                camera2.set_image_size(800, 600)
                # Set its position relative to the car in meters.
                camera2.set_position(-1.30, 1.30, 1.30)
                camera2.set_rotation(pitch=0, yaw=90, roll=0)
                settings.add_sensor(camera2)
                #############

                ############# GETTING LEFT CAMERA IMAGES
                camera2 = Camera('CameraRGBLeft')
                # Set image resolution in pixels.
                camera2.set_image_size(800, 600)
                # Set its position relative to the car in meters.
                camera2.set_position(-1.30, -1.30, 1.30)
                camera2.set_rotation(pitch=0, yaw=270, roll=0)
                settings.add_sensor(camera2)
                #############

                # Let's add another camera producing ground-truth depth.
                camera1 = Camera('CameraDepth', PostProcessing='Depth')
                camera1.set_image_size(800, 600)
                camera1.set_position(0.30, 0, 1.30)
                settings.add_sensor(camera1)

                if args.lidar:
                    lidar = Lidar('Lidar32')
                    lidar.set_position(0, 0, 2.50)
                    lidar.set_rotation(0, 0, 0)
                    lidar.set(Channels=32,
                              Range=50,
                              PointsPerSecond=100000,
                              RotationFrequency=10,
                              UpperFovLimit=10,
                              LowerFovLimit=-30)
                    settings.add_sensor(lidar)

                if args.semantic:
                    semcamera = Camera('SemanticSegmentation',
                                       PostProcessing='SemanticSegmentation')
                    semcamera.set(FOV=90.0)
                    semcamera.set_image_size(800, 600)
                    semcamera.set_position(x=0.30, y=0, z=1.30)
                    semcamera.set_rotation(pitch=0, yaw=0, roll=0)
                    settings.add_sensor(semcamera)

            else:

                # Alternatively, we can load these settings from a file.
                with open(args.settings_filepath, 'r') as fp:
                    settings = fp.read()

            # Now we load these settings into the server. The server replies
            # with a scene description containing the available start spots for
            # the player. Here we can provide a CarlaSettings object or a
            # CarlaSettings.ini file as string.
            scene = client.load_settings(settings)

            # Choose one player start at random.
            number_of_player_starts = len(scene.player_start_spots)
            player_start = random.randint(0, max(0,
                                                 number_of_player_starts - 1))

            # Notify the server that we want to start the episode at the
            # player_start index. This function blocks until the server is ready
            # to start the episode.
            print('Starting new episode...')
            client.start_episode(player_start)

            # list to hold measurements per frame for one episode
            ep_msg = []
            # Iterate every frame in the episode.
            for frame in range(0, frames_per_episode):

                # Read the data produced by the server this frame.
                measurements, sensor_data = client.read_data()

                # Print some of the measurements.
                msg = print_measurements(measurements)

                # Save the images to disk if requested.
                if args.save_images_to_disk:
                    for name, measurement in sensor_data.items():
                        filename = args.out_filename_format.format(
                            episode, name, frame)
                        measurement.save_to_disk(filename)

                # writing measurements as np array
                ep_msg.append(msg)

                # We can access the encoded data of a given image as numpy
                # array using its "data" property. For instance, to get the
                # depth value (normalized) at pixel X, Y
                #
                #     depth_array = sensor_data['CameraDepth'].data
                #     value_at_pixel = depth_array[Y, X]
                #

                # Now we have to send the instructions to control the vehicle.
                # If we are in synchronous mode the server will pause the
                # simulation until we send this control.

                if not args.autopilot:

                    client.send_control(steer=random.uniform(-1.0, 1.0),
                                        throttle=0.5,
                                        brake=0.0,
                                        hand_brake=False,
                                        reverse=False)

                else:

                    # Together with the measurements, the server has sent the
                    # control that the in-game autopilot would do this frame. We
                    # can enable autopilot by sending back this control to the
                    # server. We can modify it if wanted, here for instance we
                    # will add some noise to the steer.

                    control = measurements.player_measurements.autopilot_control
                    # control.steer += random.uniform(-0.1, 0.1)
                    client.send_control(control)

            # Write measurement data of one episode to file

            import os
            if not os.path.isdir('_out/measurements'):
                os.mkdir('_out/measurements')

            fname = "_out/measurements/" + str(episode) + ".txt"
            f = open(fname, "w+")
            for line in ep_msg:
                f.write(line)
                f.write("\n")
            f.close()
Example #19
0
    def build_experiments(self):
        """
        Creates the whole set of experiment objects,
        The experiments created depend on the selected Town.


        """

        # We set the camera
        # This single RGB camera is used on every experiment

        camera = Camera('CameraRGB')
        camera.set(FOV=100)
        camera.set_image_size(800, 600)
        camera.set_position(2.0, 0.0, 1.4)
        camera.set_rotation(-15.0, 0, 0)

        if self._city_name == 'Town01':
            poses_tasks = self._poses_town01()
            vehicles_tasks = [0, 0, 0, 20]
            pedestrians_tasks = [0, 0, 0, 50]
        elif self._city_name == 'Town02':
            poses_tasks = self._poses_town02()
            vehicles_tasks = [0, 0, 0, 20]
            pedestrians_tasks = [0, 0, 0, 50]
        elif self._city_name == 'Town01_nemesis':
            poses_tasks = self._poses_town01_nemesis()
            vehicles_tasks = [0, 0, 0, 0]
            pedestrians_tasks = [0, 0, 0, 0]
        elif self._city_name == 'Town02_nemesis':
            poses_tasks = self._poses_town02_nemesis()
            vehicles_tasks = [0, 0, 0, 0]
            pedestrians_tasks = [0, 0, 0, 0]
        elif self._city_name == 'Town01_mini':
            poses_tasks = self._poses_town01_mini()
            vehicles_tasks = [0, 0, 0, 0]
            pedestrians_tasks = [0, 0, 0, 0]

        experiments_vector = []
        """repeating experiment"""
        num_iters = 171
        weathers_iters = [1] * num_iters

        for weather in weathers_iters:

            for iteration in range(len(poses_tasks)):
                poses = poses_tasks[iteration]
                vehicles = vehicles_tasks[iteration]
                pedestrians = pedestrians_tasks[iteration]

                conditions = CarlaSettings()
                conditions.set(SendNonPlayerAgentsInfo=True,
                               NumberOfVehicles=vehicles,
                               NumberOfPedestrians=pedestrians,
                               WeatherId=weather)
                # Add all the cameras that were set for this experiments

                conditions.add_sensor(camera)

                experiment = Experiment()
                experiment.set(Conditions=conditions,
                               Poses=poses,
                               Task=iteration,
                               Repetitions=1)
                experiments_vector.append(experiment)

        return experiments_vector
Example #20
0
def run_carla_client(args):

    # Settings
    # Town 1
    ## Natural turns: [42,67,69,79,94,97,70,44,85,102], [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]
    ## T-intersection: [88,90,107,133,136]
    ## Side intersection left: [ 66, 16, 25 ]
    ## Side intersection right: [ 138, 19, 26 ]
    ## Staight: [14, 149, 78, 71, 89]

    # Town 2
    ## Natural turns right: [65, 78, 44, 1]
    ## Natural turns left: [49, 50, 57, 70]
    ## T-intersection [2, 5, 10, 11, 19, 26, 34, 37]
    ## Side intersection left: [7, 23, 16, 39, 6, 43, 79]
    ## Side intersection right: [40, 20, 28, 32, 34, 46, 71, 74]
    ## Straight: [61, 64, 77, 51]

    positions = args.positions or [65, 78, 49, 50, 2, 5, 10, 7, 23, 40, 20, 61]
    levels_of_randomness = args.randomness or [0.0, 0.2, 0.4, 0.6]
    frames_per_level = args.frames or [
        200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200
    ]

    print("Positions: ", positions)
    print("Levels of randomness: ", levels_of_randomness)
    print("Frames per level: ", frames_per_level)

    # We assume the CARLA server is already waiting for a client to connect at
    # host:port. To create a connection we can use the `make_carla_client`
    # context manager, it creates a CARLA client object and starts the
    # connection. It will throw an exception if something goes wrong. The
    # context manager makes sure the connection is always cleaned up on exit.
    with make_carla_client(args.host, args.port) as client:
        print('CarlaClient connected')

        directions = ["_left", "_right"
                      ] if args.force_left_and_right else ["_"]
        for direction in directions:
            print("Direction ", direction)
            # Create a CarlaSettings object. This object is a wrapper around
            # the CarlaSettings.ini file. Here we set the configuration we
            # want for the new episode.
            settings = CarlaSettings()
            direction_var = 6 if direction == "_right" else 0
            settings.set(SynchronousMode=True,
                         SendNonPlayerAgentsInfo=True,
                         NumberOfVehicles=0,
                         NumberOfPedestrians=0,
                         WeatherId=random.choice([2]),
                         QualityLevel=args.quality_level,
                         SeedVehicles=direction_var)
            #settings.randomize_seeds()

            if args.capture:
                # To create visualisation of the current run
                camera3 = Camera('CameraRGB')
                camera3.set_image_size(512, 256)
                camera3.set_position(-8, 0, 5)
                camera3.set(FOV=args.fov)
                camera3.set_rotation(pitch=-20, yaw=0, roll=0)
                settings.add_sensor(camera3)
                # Now we want to add a couple of cameras to the player vehicle.
                # We will collect the images produced by these cameras every
                # frame.
                if args.point_cloud:
                    # Camera to produce point Cloud
                    camera1 = Camera('CameraDepth', PostProcessing='Depth')
                    camera1.set_image_size(256, 256)
                    camera1.set_position(2.2, 0, 1.30)
                    camera1.set(FOV=args.fov)
                    settings.add_sensor(camera1)

                    camera2 = Camera('CameraSeg',
                                     PostProcessing='SemanticSegmentation')
                    camera2.set_image_size(256, 256)
                    camera2.set_position(2.2, 0, 1.30)
                    camera2.set(FOV=args.fov)
                    settings.add_sensor(camera2)

                if args.lidar:
                    lidar = Lidar('Lidar32')
                    lidar.set_position(0, 0, 2.50)
                    lidar.set_rotation(0, 0, 0)
                    lidar.set(Channels=32,
                              Range=50,
                              PointsPerSecond=100000,
                              RotationFrequency=10,
                              UpperFovLimit=10,
                              LowerFovLimit=-30)
                    settings.add_sensor(lidar)

            # Now we load these settings into the server. The server replies
            # with a scene description containing the available start spots for
            # the player. Here we can provide a CarlaSettings object or a
            # CarlaSettings.ini file as string.
            scene = client.load_settings(settings)

            # Notify the server that we want to start the episode at the
            # player_start index. This function blocks until the server is ready
            # to start the episode.

            for episode, position in enumerate(positions):
                for randomness, frames in zip(levels_of_randomness,
                                              frames_per_level):
                    crashed = True
                    while crashed:
                        crashed = False
                        # Start a new episode.
                        print('Starting new episode at %r...' % scene.map_name)
                        print('Episode {}, Position {}, Randomness {}'.format(
                            episode, position, randomness))
                        client.start_episode(position)

                        if args.capture:
                            # Make sure directories exist
                            directory = '_out/pos{}{}/randomness_{}'.format(
                                position, direction, randomness)
                            ply_dir = '{}/point_clouds'.format(directory)
                            ply_dir_full = '{}/point_clouds_full'.format(
                                directory)
                            lidar_dir = '{}/lidar'.format(directory)
                            img_dir = '{}/images'.format(directory)
                            if not os.path.exists(img_dir):
                                os.makedirs(img_dir)
                            if args.point_cloud and not os.path.exists(
                                    ply_dir):
                                os.makedirs(ply_dir)
                            if args.point_cloud and not os.path.exists(
                                    ply_dir_full):
                                os.makedirs(ply_dir_full)
                            if args.lidar and not os.path.exists(lidar_dir):
                                os.makedirs(lidar_dir)
                            # Write steering data to csv file
                            if args.point_cloud:
                                csv = open(
                                    "{}/driving_data.csv".format(ply_dir), "w")
                            elif args.lidar:
                                csv = open(
                                    "{}/driving_data.csv".format(lidar_dir),
                                    "w")
                            csv.write(",name,speed,throttle,steer\n")

                        # Iterate every frame in the episode
                        for frame in tqdm(range(frames)):
                            # Read the data produced by the server this frame.
                            measurements, sensor_data = client.read_data()

                            if args.capture:
                                if args.point_cloud:
                                    # Save processed point clouds and autopilot steering to disk if requested
                                    # Get depth and seg as numpy array for further processing
                                    depth_obj = sensor_data['CameraDepth']
                                    depth = depth_obj.data
                                    fov = depth_obj.fov

                                    # Filter seg to get intersection points
                                    seg = sensor_data['CameraSeg'].data
                                    filtered_seg = pp.filter_seg_image(seg)

                                    if args.full_point_cloud:
                                        # Converting depth image to grayscale
                                        point_cloud_full = pp.depth_to_local_point_cloud(
                                            depth,
                                            fov,
                                            seg,
                                            max_depth=0.05,
                                            full=True)
                                        filename_full = "point_cloud_full_{:0>5d}".format(
                                            frame)
                                        pp.save_to_disk(
                                            point_cloud_full,
                                            "{}/{}.ply".format(
                                                ply_dir_full, filename_full))

                                    # Create pointcloud from seg and depth (but only take intersection points)
                                    point_cloud = pp.depth_to_local_point_cloud(
                                        depth,
                                        fov,
                                        filtered_seg,
                                        max_depth=0.05)
                                    filename = "point_cloud_{:0>5d}".format(
                                        frame)
                                    pp.save_to_disk(
                                        point_cloud,
                                        "{}/{}.ply".format(ply_dir, filename))

                                if args.lidar:
                                    sensor_data['Lidar32'].save_to_disk(
                                        '{}/point_cloud_{:0>5d}'.format(
                                            lidar_dir, frame))

                                # Save steering data of this frame
                                control = measurements.player_measurements.autopilot_control
                                csv.write("0,image_{:0>5d},0,0,{}\n".format(
                                    frame, control.steer))

                                # Save rgb image to visualize later
                                sensor_data['CameraRGB'].save_to_disk(
                                    '{}/image_{:0>5d}.png'.format(
                                        img_dir, frame))

                            # Now we have to send the instructions to control the vehicle.
                            # If we are in synchronous mode the server will pause the
                            # simulation until we send this control.

                            control = measurements.player_measurements.autopilot_control
                            speed = measurements.player_measurements.forward_speed
                            old_steer = control.steer
                            control.steer += random.uniform(
                                -randomness, randomness)

                            if args.ignore_red_lights:
                                control.throttle = 0.5
                                control.brake = 0.0
                                control.hand_brake = False
                                control.reverse = False

                            client.send_control(control)
                            crashed = False
                            if speed < 1 and abs(old_steer) > 0.5:
                                print(
                                    "\nSeems like we crashed.\nTrying again..."
                                )
                                crashed = True
                                break
Example #21
0
    def build_experiments(self):
        """
            Creates the whole set of experiment objects,
            The experiments created depends on the selected Town.

        """

        # We check the town, based on that we define the town related parameters
        # The size of the vector is related to the number of tasks, inside each
        # task there is also multiple poses ( start end, positions )
        if self._city_name == 'Town01':
            #poses_tasks = [[[7, 3]], [[138, 17]], [[140, 134]], [[140, 134]]]
            poses_tasks = [[[138, 134]]]
            vehicles_tasks = [20]
            pedestrians_tasks = [10]
        else:
            right_curves = [[[1, 56], [65, 69], [78, 51], [44, 61], [40, 17],
                             [71, 16], [74, 38], [46, 12], [19, 18], [26, 74],
                             [37, 76], [11, 44], [20, 6], [10, 22], [28, 2],
                             [5, 15], [14, 33], [34, 8]]]
            left_curves = [[[57, 82], [72, 43], [52, 79], [70, 66], [43, 14],
                            [11, 47], [79, 32], [37, 75], [75, 16], [26, 73],
                            [39, 5], [2, 37], [34, 13], [6, 35], [10, 19],
                            [23, 6], [5, 30], [16, 2]]]
            special_test = [[[19, 66], [79, 14], [42, 13], [31, 71], [54, 30],
                             [10, 61], [66, 3], [27, 12], [2, 29], [16, 14],
                             [70, 73], [46, 67], [51, 81], [56, 65], [43, 54]]]
            corl_task2 = [[[19, 66], [79, 14], [19, 57], [23, 1], [53, 76],
                           [42, 13], [31, 71], [33, 5], [54, 30], [10, 61],
                           [66, 3], [27, 12], [79, 19], [2, 29], [16, 14],
                           [5, 57], [70, 73], [46, 67], [57, 50], [61, 49],
                           [21, 12], [51, 81], [77, 68], [56, 65], [43, 54]]]

            #poses_tasks = corl_task2
            poses_tasks = [[[10, 19]]]
            vehicles_tasks = [0]  #*len(poses_tasks[0])
            pedestrians_tasks = [0]  #*len(poses_tasks[0])

        # We set the camera
        # This single RGB camera is used on every experiment
        '''camera = Camera('CameraRGB')
        camera.set(FOV=90)
        camera.set_image_size(800, 600)
        camera.set_position(1.44, 0.0, 1.2)
        camera.set_rotation(0, 0, 0)'''

        camera0 = Camera('CameraRGB0')
        camera0.set(FOV=90)
        camera0.set_image_size(400, 300)
        camera0.set_position(1.7, 0.0, 1.3)
        camera0.set_rotation(0, 0, 0)

        camera1 = Camera('CameraRGB1')
        camera1.set(FOV=90)
        camera1.set_image_size(400, 300)
        camera1.set_position(1.7, 0.0, 1.3)
        camera1.set_rotation(0, -45, 0)

        camera2 = Camera('CameraRGB2')
        camera2.set(FOV=90)
        camera2.set_image_size(400, 300)
        camera2.set_position(1.7, 0.0, 1.3)
        camera2.set_rotation(0, +45, 0)

        # Based on the parameters, creates a vector with experiment objects.
        experiments_vector = []
        for weather in self.weathers:

            for iteration in range(len(poses_tasks)):
                poses = poses_tasks[iteration]
                vehicles = vehicles_tasks[iteration]
                pedestrians = pedestrians_tasks[iteration]

                conditions = CarlaSettings()
                conditions.set(SendNonPlayerAgentsInfo=True,
                               NumberOfVehicles=vehicles,
                               NumberOfPedestrians=pedestrians,
                               WeatherId=weather)
                # Add all the cameras that were set for this experiments
                conditions.add_sensor(camera0)
                conditions.add_sensor(camera1)
                conditions.add_sensor(camera2)
                experiment = Experiment()
                experiment.set(Conditions=conditions,
                               Poses=poses,
                               Task=iteration,
                               Repetitions=1)
                experiments_vector.append(experiment)
            '''path0=os.listdir('CameraRGB0')
            path1=os.listdir('CameraRGB1')
            path2=os.listdir('CameraRGB2')
            os.mkdir('CameraRGB')

            width=400
            height=300

            total_width = width*3
            max_height = height

            i = 0
            images=[]
            images.append(Image.open('CameraRGB1/'+path1[i]))
            images.append(Image.open('CameraRGB0/'+path0[i]))
            images.append(Image.open('CameraRGB2/'+path2[i]))
            new_im = Image.new('RGB', (total_width, max_height))
            x_offset = 0
            for im in images:
                new_im.paste(im, (x_offset,0))
                x_offset += im.size[0]
                new_im.save('CameraRGB/'+path0[i])
            i = i+1'''

        return experiments_vector
Example #22
0
    def build_experiments(self):
        """
        Creates the whole set of experiment objects,
        The experiments created depend on the selected Town.


        """

        # We set the camera
        # This single RGB camera is used on every experiment

        camera = Camera('CameraMiddle')
        camera.set(FOV=90)
        camera.set_image_size(768, 576)
        camera.set_position(1.5, 0.0, 1.6)
        camera.set_rotation(0, 0, 0)

        if self._city_name == 'Town01':
            poses_tasks = self._poses_town01()
            vehicles_tasks = [0, 0, 0, 100]
            pedestrians_tasks = [0, 0, 0, 300]
            n_samples = [0, 0, 30 // 4, 60 // 4]
            #n_samples = [3, 6, 6, 9]
        else:
            poses_tasks = self._poses_town02()
            vehicles_tasks = [0, 0, 0, 50]
            pedestrians_tasks = [0, 0, 0, 150]
            n_samples = [0, 0, 15, len(poses_tasks[-1])]
            #n_samples = [3, 6, 6, 9]

        experiments_vector = []

        random.seed(1)

        for weather in self.weathers:

            for iteration in range(len(poses_tasks)):
                poses = poses_tasks[iteration]
                vehicles = vehicles_tasks[iteration]
                pedestrians = pedestrians_tasks[iteration]

                nsample = n_samples[iteration]
                if nsample == 0:
                    continue
                poses = random.sample(poses, nsample)

                conditions = CarlaSettings()
                conditions.set(SendNonPlayerAgentsInfo=True,
                               NumberOfVehicles=vehicles,
                               NumberOfPedestrians=pedestrians,
                               WeatherId=weather,
                               QualityLevel="Low")
                # Add all the cameras that were set for this experiments

                conditions.add_sensor(camera)

                experiment = Experiment()
                experiment.set(Conditions=conditions,
                               Poses=poses,
                               Task=iteration,
                               Repetitions=1)
                experiments_vector.append(experiment)

        return experiments_vector
def build_eccv_navigation_dynamic():
    def _poses_town01():
        """
        Each matrix is a new task. We have all the four tasks

        """

        def _poses_navigation():
            return [[36, 40], [39, 35]]
            #return [[105, 29], [27, 130], [102, 87], [132, 27], [24, 44],
            #        [96, 26], [34, 67], [28, 1], [140, 134], [105, 9],
            #        [148, 129], [65, 18], [21, 16], [147, 97], [42, 51],
            #        [30, 41], [18, 107], [69, 45], [102, 95], [18, 145],
            #        [111, 64], [79, 45], [84, 69], [73, 31], [37, 81]]

        return [_poses_navigation()]

    def _poses_town02():


        def _poses_navigation():
            return [[38, 34], [4, 2]]
            #return [[19, 66], [79, 14], [19, 57], [23, 1],
            #        [53, 76], [42, 13], [31, 71], [33, 5],
            #        [54, 30], [10, 61], [66, 3], [27, 12],
            #        [79, 19], [2, 29], [16, 14], [5, 57],
            #        [70, 73], [46, 67], [57, 50], [61, 49], [21, 12],
            #        [51, 81], [77, 68], [56, 65], [43, 54]]

        return [_poses_navigation()]

    # We check the town, based on that we define the town related parameters
    # The size of the vector is related to the number of tasks, inside each
    # task there is also multiple poses ( start end, positions )

    exp_set_dict = {
        'Name': 'eccv_navigation_dynamic',
        'Town01': {'poses': _poses_town01(),
                   'vehicles': [20],
                   'pedestrians': [50],
                   'weathers_train': [1],
                   'weathers_validation': []

                   },
        'Town02': {'poses': _poses_town02(),
                   'vehicles': [15],
                   'pedestrians': [50],
                   'weathers_train': [],
                   'weathers_validation': [14]

                   }
    }

    # We set the camera
    # This single RGB camera is used on every experiment
    camera = Camera('rgb')
    camera.set(FOV=100)
    camera.set_image_size(800, 600)
    camera.set_position(2.0, 0.0, 1.4)
    camera.set_rotation(-15.0, 0, 0)
    sensor_set = [camera]

    return _build_experiments(exp_set_dict, sensor_set), exp_set_dict
Example #24
0
def run_carla_client(args):
    skip_frames = 100  # 100 # at 10 fps
    number_of_episodes = args.n_episode
    frames_per_episode = args.n_frame

    # n_weather = 14 # weathers starts from 1
    # n_player_start_spots = 152 # Town01
    n_player_start_spots = 83  # Town02

    weathers = number_of_episodes * [2]  # CloudyNoon
    start_spots = list(range(n_player_start_spots))
    random.shuffle(start_spots)
    assert number_of_episodes < n_player_start_spots
    start_spots = start_spots[:number_of_episodes]

    # weathers = list(range(number_of_episodes))
    # # random.shuffle(weathers)
    # weathers = [w % 14 + 1 for w in weathers]
    # https://carla.readthedocs.io/en/latest/carla_settings/

    # number_of_episodes = n_weather*n_player_start_spots
    # frames_per_episode = args.n_frame

    # weathers, start_spots = np.meshgrid(list(range(1, n_weather+1)), list(range(n_player_start_spots)))

    # weathers = weathers.flatten()
    # start_spots = start_spots.flatten()

    if not os.path.isdir(args.out_dir):
        os.makedirs(args.out_dir)
    np.savetxt(join(args.out_dir, 'weathers.txt'), weathers, fmt='%d')
    np.savetxt(join(args.out_dir, 'start-spots.txt'), start_spots, fmt='%d')
    # We assume the CARLA server is already waiting for a client to connect at
    # host:port. To create a connection we can use the `make_carla_client`
    # context manager, it creates a CARLA client object and starts the
    # connection. It will throw an exception if something goes wrong. The
    # context manager makes sure the connection is always cleaned up on exit.
    with make_carla_client(args.host, args.port) as client:
        print('CarlaClient connected')

        for episode in range(number_of_episodes):
            # Start a new episode.

            if args.settings_filepath is None:

                # Create a CarlaSettings object. This object is a wrapper around
                # the CarlaSettings.ini file. Here we set the configuration we
                # want for the new episode.
                settings = CarlaSettings()
                settings.set(
                    SynchronousMode=True,
                    SendNonPlayerAgentsInfo=True,
                    NumberOfVehicles=20,
                    NumberOfPedestrians=40,
                    WeatherId=weathers[episode],
                    # WeatherId=random.randrange(14) + 1,
                    # WeatherId=random.choice([1, 3, 7, 8, 14]),
                    QualityLevel=args.quality_level)
                settings.randomize_seeds()

                # Now we want to add a couple of cameras to the player vehicle.
                # We will collect the images produced by these cameras every
                # frame.

                # The default camera captures RGB images of the scene.
                camera0 = Camera('RGB')
                # Set image resolution in pixels.
                camera0.set(FOV=args.cam_fov)
                camera0.set_image_size(args.x_res, args.y_res)
                # Set its position relative to the car in meters.
                camera0.set_position(*args.cam_offset)
                camera0.set_rotation(*args.cam_rotation)
                settings.add_sensor(camera0)

                # Let's add another camera producing ground-truth depth.
                camera1 = Camera('Depth', PostProcessing='Depth')
                camera1.set(FOV=args.cam_fov)
                camera1.set_image_size(args.x_res, args.y_res)
                camera1.set_position(*args.cam_offset)
                camera1.set_rotation(*args.cam_rotation)
                settings.add_sensor(camera1)

                camera2 = Camera('SegRaw',
                                 PostProcessing='SemanticSegmentation')
                camera2.set(FOV=args.cam_fov)
                camera2.set_image_size(args.x_res, args.y_res)
                camera2.set_position(*args.cam_offset)
                camera2.set_rotation(*args.cam_rotation)
                settings.add_sensor(camera2)

                if args.lidar:
                    lidar = Lidar('Lidar32')
                    lidar.set_position(0, 0, 2.50)
                    lidar.set_rotation(0, 0, 0)
                    lidar.set(Channels=32,
                              Range=50,
                              PointsPerSecond=100000,
                              RotationFrequency=10,
                              UpperFovLimit=10,
                              LowerFovLimit=-30)
                    settings.add_sensor(lidar)

            else:

                # Alternatively, we can load these settings from a file.
                with open(args.settings_filepath, 'r') as fp:
                    settings = fp.read()

            # Now we load these settings into the server. The server replies
            # with a scene description containing the available start spots for
            # the player. Here we can provide a CarlaSettings object or a
            # CarlaSettings.ini file as string.
            scene = client.load_settings(settings)

            # Choose one player start at random.
            # number_of_player_starts = len(scene.player_start_spots)
            # player_start = random.randrange(number_of_player_starts)

            player_start = start_spots[episode]

            # Notify the server that we want to start the episode at the
            # player_start index. This function blocks until the server is ready
            # to start the episode.
            print('Starting new episode...')
            client.start_episode(player_start)

            frame = 0
            save_frame_idx = 0

            # extra_frames = 0
            # last_control = None
            # last_control_changed = 0
            # while frame < skip_frames + frames_per_episode + extra_frames:

            # Iterate every frame in the episode.
            for frame in range(skip_frames + frames_per_episode):
                # Read the data produced by the server this frame.
                measurements, sensor_data = client.read_data()

                # Print some of the measurements.
                print_measurements(measurements)

                # Save the images to disk if requested.
                if args.save_images_to_disk and frame >= skip_frames \
                    and (frame - skip_frames) % args.save_every_n_frames == 0:
                    # if last_control_changed < frame - args.save_every_n_frames:
                    #     extra_frames += args.save_every_n_frames
                    #     last_control_changed += args.save_every_n_frames
                    # else:
                    save_frame_idx += 1
                    for name, measurement in sensor_data.items():
                        filename = args.out_filename_format.format(
                            episode + 1, name, save_frame_idx)
                        measurement.save_to_disk(filename)

                # We can access the encoded data of a given image as numpy
                # array using its "data" property. For instance, to get the
                # depth value (normalized) at pixel X, Y
                #
                #     depth_array = sensor_data['CameraDepth'].data
                #     value_at_pixel = depth_array[Y, X]
                #

                # Now we have to send the instructions to control the vehicle.
                # If we are in synchronous mode the server will pause the
                # simulation until we send this control.

                if not args.autopilot:

                    client.send_control(steer=random.uniform(-1.0, 1.0),
                                        throttle=0.5,
                                        brake=0.0,
                                        hand_brake=False,
                                        reverse=False)

                else:

                    # Together with the measurements, the server has sent the
                    # control that the in-game autopilot would do this frame. We
                    # can enable autopilot by sending back this control to the
                    # server. We can modify it if wanted, here for instance we
                    # will add some noise to the steer.

                    control = measurements.player_measurements.autopilot_control
                    # if last_control:
                    #     for v1, v2 in zip(control.values(), last_control.values()):
                    #         if v1 != v2:
                    #             last_control_changed = frame
                    #             break

                    control.steer += random.uniform(-0.1, 0.1)
                    client.send_control(control)
Example #25
0
    def build_experiments(self):
        """
            Creates the whole set of experiment objects,
            The experiments created depends on the selected Town.

        """

        # We check the town, based on that we define the town related parameters
        # The size of the vector is related to the number of tasks, inside each
        # task there is also multiple poses ( start end, positions )
        if self._city_name == 'Town01':
            poses_tasks = [[[7, 3]], [[138, 17]], [[140, 134]], [[140, 134]]]
            vehicles_tasks = [0, 0, 0, 20]
            pedestrians_tasks = [0, 0, 0, 50]
        else:
            right_curves =  [[[1,56],[65,69],[78,51],[44,61],[40,17],[71,16],[74,38],[46,12],
                              [19,18],[26,74],[37,76],[11,44],[20,6],[10,22],[28,2],[5,15],
                              [14,33],[34,8]]]      
            left_curves =  [[[57,82],[72,43],[52,79],[70,66],[43,14],[11,47],[79,32],[37,75],
                             [75,16],[26,73],[39,5],[2,37],[34,13],[6,35],[10,19],[23,6],
                             [5,30],[16,2]]] 
            special_test = [[[19, 66], [79, 14],[42, 13], [31, 71], 
                             [54, 30], [10, 61], [66, 3], [27, 12],
                             [2, 29], [16, 14],[70, 73], [46, 67],
                             [51, 81], [56, 65], [43, 54]]]
            corl_task2 = [[[19, 66], [79, 14], [19, 57], [23, 1],
                    [53, 76], [42, 13], [31, 71], [33, 5],
                    [54, 30], [10, 61], [66, 3], [27, 12],
                    [79, 19], [2, 29], [16, 14], [5, 57],
                    [70, 73], [46, 67], [57, 50], [61, 49], [21, 12],
                    [51, 81], [77, 68], [56, 65], [43, 54]]]

            poses_tasks = corl_task2
            vehicles_tasks = [0]*len(poses_tasks[0])
            pedestrians_tasks = [0]*len(poses_tasks[0])

        # We set the camera
        # This single RGB camera is used on every experiment

        camera = Camera('CameraRGB')
        camera.set(FOV=90)
        camera.set_image_size(800, 600)
        camera.set_position(1.44, 0.0, 1.2)
        camera.set_rotation(0, 0, 0)

        # Based on the parameters, creates a vector with experiment objects.
        experiments_vector = []
        for weather in self.weathers:

            for iteration in range(len(poses_tasks)):
                poses = poses_tasks[iteration]
                vehicles = vehicles_tasks[iteration]
                pedestrians = pedestrians_tasks[iteration]

                conditions = CarlaSettings()
                conditions.set(
                    SendNonPlayerAgentsInfo=True,
                    NumberOfVehicles=vehicles,
                    NumberOfPedestrians=pedestrians,
                    WeatherId=weather

                )
                # Add all the cameras that were set for this experiments
                conditions.add_sensor(camera)
                experiment = Experiment()
                experiment.set(
                    Conditions=conditions,
                    Poses=poses,
                    Task=iteration,
                    Repetitions=1
                )
                experiments_vector.append(experiment)

        return experiments_vector