Esempio n. 1
0
class DrivingBenchmark(object):
    """
    The Benchmark class, controls the execution of the benchmark interfacing
    an Agent class with a set Suite.


    The benchmark class must be inherited with a class that defines the
    all the experiments to be run by the agent
    """
    def __init__(self,
                 city_name='Town01',
                 name_to_save='Test',
                 continue_experiment=False,
                 save_images=False,
                 distance_for_success=2.0):

        self.__metaclass__ = abc.ABCMeta

        self._city_name = city_name
        self._base_name = name_to_save
        # The minimum distance for arriving into the goal point in
        # order to consider ir a success
        self._distance_for_success = distance_for_success
        # The object used to record the benchmark and to able to continue after
        self._recording = Recording(name_to_save=name_to_save,
                                    continue_experiment=continue_experiment,
                                    save_images=save_images)

        # We have a default planner instantiated that produces high level commands
        self._planner = Planner(city_name)

    def benchmark_agent(self, experiment_suite, agent, client):
        """
        Function to benchmark the agent.
        It first check the log file for this benchmark.
        if it exist it continues from the experiment where it stopped.


        Args:
            experiment_suite
            agent: an agent object with the run step class implemented.
            client:


        Return:
            A dictionary with all the metrics computed from the
            agent running the set of experiments.
        """

        # Instantiate a metric object that will be used to compute the metrics for
        # the benchmark afterwards.
        data = OrderedDict()
        metrics_object = Metrics(experiment_suite.metrics_parameters,
                                 experiment_suite.dynamic_tasks)

        # Function return the current pose and task for this benchmark.
        start_pose, start_experiment = self._recording.get_pose_and_experiment(
            experiment_suite.get_number_of_poses_task())

        logging.info('START')
        cols = [
            'Start_position', 'End_position', 'Total-Distance', 'Time',
            "Distance-Travelled", "Follow_lane", "Straight", "Left", "Right",
            "Avg-Speed", "Collision-Pedestrian", "Collision-Vehicle",
            "Collision-Other", "Intersection-Lane", "Intersection-Offroad",
            "Traffic_Light_Infraction", "Success"
        ]
        df = pd.DataFrame(columns=cols)
        ts = str(int(time.time()))

        for experiment in experiment_suite.get_experiments(
        )[int(start_experiment):]:

            positions = client.load_settings(
                experiment.conditions).player_start_spots

            self._recording.log_start(experiment.task)

            for pose in experiment.poses[start_pose:]:
                data['Start_position'] = pose[0]
                data['End_position'] = pose[1]

                #print(df)

                for rep in range(experiment.repetitions):

                    start_index = pose[0]
                    end_index = pose[1]

                    client.start_episode(start_index)
                    # Print information on
                    logging.info('======== !!!! ==========')
                    logging.info(' Start Position %d End Position %d ',
                                 start_index, end_index)

                    self._recording.log_poses(start_index, end_index,
                                              experiment.Conditions.WeatherId)

                    # Calculate the initial distance for this episode
                    initial_distance = \
                        sldist(
                            [positions[start_index].location.x, positions[start_index].location.y],
                            [positions[end_index].location.x, positions[end_index].location.y])
                    data["Total-Distance"] = initial_distance
                    time_out = experiment_suite.calculate_time_out(
                        self._get_shortest_path(positions[start_index],
                                                positions[end_index]))

                    # running the agent
                    (result, reward_vec, control_vec, final_time, remaining_distance, data) = \
                        self._run_navigation_episode(
                            agent, client, time_out, positions[end_index],
                            str(experiment.Conditions.WeatherId) + '_'
                            + str(experiment.task) + '_' + str(start_index)
                            + '.' + str(end_index) , data)
                    data["Time"] = final_time
                    # Write the general status of the just ran episode
                    self._recording.write_summary_results(
                        experiment, pose, rep, initial_distance,
                        remaining_distance, final_time, time_out, result)

                    # Write the details of this episode.
                    self._recording.write_measurements_results(
                        experiment, rep, pose, reward_vec, control_vec)
                    data['Success'] = result
                    if result > 0:
                        logging.info(
                            '+++++ Target achieved in %f seconds! +++++',
                            final_time)

                    else:
                        logging.info('----- Timeout! -----')
                #data['Start_position'] = start_index
                #data['End_position'] = end_index
                #df = df.append(data , ignore_index=True)
                #print(df)
                data["Avg-Speed"] = 3.6 * (data['Total-Distance'] /
                                           data['Time'])
                df = df.append(data, ignore_index=True)
                df = df[cols]
                try:
                    df.to_csv("Test_result_" + ts + '.csv',
                              columns=cols,
                              index=True)
                except:
                    print("File being used will save later")
            start_pose = 0

        self._recording.log_end()

        return metrics_object.compute(self._recording.path)

    def get_path(self):
        """
        Returns the path were the log was saved.
        """
        return self._recording.path

    def _get_directions(self, current_point, end_point):
        """
        Class that should return the directions to reach a certain goal
        """

        directions = self._planner.get_next_command(
            (current_point.location.x, current_point.location.y, 0.22),
            (current_point.orientation.x, current_point.orientation.y,
             current_point.orientation.z),
            (end_point.location.x, end_point.location.y, 0.22),
            (end_point.orientation.x, end_point.orientation.y,
             end_point.orientation.z))
        return directions

    def _get_shortest_path(self, start_point, end_point):
        """
        Calculates the shortest path between two points considering the road netowrk
        """

        return self._planner.get_shortest_path_distance(
            [start_point.location.x, start_point.location.y, 0.22],
            [start_point.orientation.x, start_point.orientation.y, 0.22],
            [end_point.location.x, end_point.location.y, end_point.location.z],
            [
                end_point.orientation.x, end_point.orientation.y,
                end_point.orientation.z
            ])

    def _run_navigation_episode(self, agent, client, time_out, target,
                                episode_name, data):
        """
         Run one episode of the benchmark (Pose) for a certain agent.


        Args:
            agent: the agent object
            client: an object of the carla client to communicate
            with the CARLA simulator
            time_out: the time limit to complete this episode
            target: the target to reach
            episode_name: The name for saving images of this episode

        """

        # Send an initial command.
        direction2text = {
            0: 'Follow_lane',
            1: 'Follow_lane',
            2: 'Follow_lane',
            3: 'Left',
            4: 'Right',
            5: 'Straight'
        }
        measurements, sensor_data = client.read_data()
        client.send_control(VehicleControl())

        initial_timestamp = measurements.game_timestamp
        current_timestamp = initial_timestamp

        # The vector containing all measurements produced on this episode
        measurement_vec = []
        # The vector containing all controls produced on this episode
        control_vec = []
        frame = 0
        distance = 10000
        success = False
        # Initialize lane and Offroad fields
        data["Intersection-Lane"] = 0
        data["Intersection-Offroad"] = 0
        data["Directions"] = []
        agent.command_follower.controller.params[
            'target_speed'] = agent.command_follower.controller.params[
                'default_speed_limit']
        agent.traffic_light_infraction = False
        for x in range(6):
            data[direction2text[x]] = 0
        data["Collision-Pedestrian"] = 0
        data["Collision-Vehicle"] = 0
        data["Collision-Other"] = 0
        while True:  #

            # Read data from server with the client
            measurements, sensor_data = client.read_data()
            # The directions to reach the goal are calculated.
            directions = self._get_directions(
                measurements.player_measurements.transform, target)
            # Agent process the data.
            if data[direction2text[directions]] == 0:
                data[direction2text[directions]] = 1
            control, controller_state = agent.run_step(measurements,
                                                       sensor_data, directions,
                                                       target)
            # Send the control commands to the vehicle
            client.send_control(control)
            if measurements.player_measurements.intersection_otherlane > data[
                    "Intersection-Lane"]:
                data[
                    "Intersection-Lane"] = measurements.player_measurements.intersection_otherlane
            if measurements.player_measurements.intersection_offroad > data[
                    "Intersection-Offroad"]:
                data[
                    "Intersection-Offroad"] = measurements.player_measurements.intersection_offroad
            # save images if the flag is activated
            self._recording.save_images(sensor_data, episode_name, frame)

            current_x = measurements.player_measurements.transform.location.x
            current_y = measurements.player_measurements.transform.location.y
            # If vehicle has stopped for pedestiran, vehicle or traffic light increase timeout by 1 unit
            if min(controller_state['stop_pedestrian'], controller_state['stop_vehicle'],\
                controller_state['stop_traffic_lights']) <= 0.3 :
                time_out += (measurements.game_timestamp -
                             current_timestamp) / 1000
            #logging.info("Controller sis Inputting:")
            #logging.info('Steer = %f Throttle = %f Brake = %f ',
            #             control.steer, control.throttle, control.brake)
            current_timestamp = measurements.game_timestamp
            # Get the distance travelled until now
            distance = sldist([current_x, current_y],
                              [target.location.x, target.location.y])
            # Write status of the run on verbose mode
            #logging.info('Status:')
            print(
                f"Distance to target: {float(distance):.2f} \t Time_taken: {(current_timestamp -initial_timestamp)/1000 :.2f}\t Timeout : {time_out :.2f}"
            )
            # Check if reach the target
            data["Distance-Travelled"] = data["Total-Distance"] - distance
            if distance < self._distance_for_success:
                success = True
                break
            if (current_timestamp - initial_timestamp) / 1000 > time_out:
                data["Intersection-Offroad"] = 0
                data["Intersection-Lane"] = 0
                data["Collision-Pedestrian"] = 0
                data["Collision-Vehicle"] = 0
                data["Collision-Other"] = 0
                success = False
                print("Timeout")
                break

            # Increment the vectors and append the measurements and controls.
            frame += 1
            measurement_vec.append(measurements.player_measurements)
            control_vec.append(control)

            #Stop in case of collision or excessive lane infraction
            if measurements.player_measurements.collision_pedestrians >300 or \
                measurements.player_measurements.collision_vehicles > 300 or \
                measurements.player_measurements.collision_other>300:
                if measurements.player_measurements.collision_pedestrians > 300:
                    data["Collision-Pedestrian"] = 1
                elif measurements.player_measurements.collision_vehicles > 300:
                    data["Collision-Vehicle"] = 1
                elif measurements.player_measurements.collision_other > 300:
                    data["Collision-Other"] = 1
                data["Intersection-Offroad"] = 0
                data["Intersection-Lane"] = 0
                print("Collison detected")
                success = False
                break

            if data["Intersection-Lane"] > 0.5:
                data["Intersection-Lane"] = 1
                data["Intersection-Offroad"] = 0
                data["Collision-Pedestrian"] = 0
                data["Collision-Vehicle"] = 0
                data["Collision-Other"] = 0
                print("Lane Infraction")
                success = False
                break
            elif data["Intersection-Offroad"] > 0.5:
                data["Intersection-Offroad"] = 1
                data["Intersection-Lane"] = 0
                data["Collision-Pedestrian"] = 0
                data["Collision-Vehicle"] = 0
                data["Collision-Other"] = 0
                print("Lane Infraction")
                success = False
                break
        if success:
            data["Intersection-Offroad"] = 0
            data["Intersection-Lane"] = 0
            data["Collision-Pedestrian"] = 0
            data["Collision-Vehicle"] = 0
            data["Collision-Other"] = 0
            return 1, measurement_vec, control_vec, float(
                current_timestamp - initial_timestamp) / 1000.0, distance, data
        return 0, measurement_vec, control_vec, time_out, distance, data
Esempio n. 2
0
class DrivingBenchmark(object):
    """
    The Benchmark class, controls the execution of the benchmark interfacing
    an Agent class with a set Suite.


    The benchmark class must be inherited with a class that defines the
    all the experiments to be run by the agent
    """
    def __init__(self,
                 city_name='Town01',
                 name_to_save='Test',
                 continue_experiment=False,
                 save_images=False,
                 distance_for_success=2.0):

        self.__metaclass__ = abc.ABCMeta

        self._city_name = city_name
        self._base_name = name_to_save
        # The minimum distance for arriving into the goal point in
        # order to consider ir a success
        self._distance_for_success = distance_for_success
        # The object used to record the benchmark and to able to continue after
        self._recording = Recording(dir_to_save=city_name,
                                    name_to_save=name_to_save,
                                    continue_experiment=continue_experiment,
                                    save_images=save_images)

        # We have a default planner instantiated that produces high level commands
        self._planner = Planner(city_name)

        self._episode_number = 0

    def benchmark_agent(self, experiment_suite, agent, client):
        """
        Function to benchmark the agent.
        It first check the log file for this benchmark.
        if it exist it continues from the experiment where it stopped.


        Args:
            experiment_suite
            agent: an agent object with the run step class implemented.
            client:


        Return:
            A dictionary with all the metrics computed from the
            agent running the set of experiments.
        """

        # Instantiate a metric object that will be used to compute the metrics for
        # the benchmark afterwards.
        metrics_object = Metrics(experiment_suite.metrics_parameters,
                                 experiment_suite.dynamic_tasks)

        # Function return the current pose and task for this benchmark.
        start_pose, start_experiment = self._recording.get_pose_and_experiment(
            experiment_suite.get_number_of_poses_task())

        logging.info('START')

        for experiment in experiment_suite.get_experiments(
        )[int(start_experiment):]:

            positions = client.load_settings(
                experiment.conditions).player_start_spots

            self._recording.log_start(experiment.task)

            for pose in experiment.poses[start_pose:]:
                for rep in range(experiment.repetitions):

                    start_index = pose[0]
                    end_index = pose[1]

                    client.start_episode(start_index)
                    self._episode_number += 1
                    # Print information on
                    logging.info('======== !!!! ==========')
                    logging.info('Episode Number: %d', self._episode_number)
                    logging.info(' Start Position %d End Position %d ',
                                 start_index, end_index)

                    self._recording.log_poses(start_index, end_index,
                                              experiment.Conditions.WeatherId)

                    # Calculate the initial distance for this episode
                    initial_distance = \
                        sldist(
                            [positions[start_index].location.x, positions[start_index].location.y],
                            [positions[end_index].location.x, positions[end_index].location.y])

                    time_out = experiment_suite.calculate_time_out(
                        self._get_shortest_path(positions[start_index],
                                                positions[end_index]))

                    # running the agent
                    (result, reward_vec, control_vec, final_time, remaining_distance) = \
                        self._run_navigation_episode(
                            agent, client, time_out, positions[end_index],
                            str(experiment.Conditions.WeatherId) + '_'
                            + str(experiment.task) + '_' + str(start_index)
                            + '.' + str(end_index))

                    # Write the general status of the just ran episode
                    self._recording.write_summary_results(
                        experiment, pose, rep, initial_distance,
                        remaining_distance, final_time, time_out, result)

                    # Write the details of this episode.
                    self._recording.write_measurements_results(
                        experiment, rep, pose, reward_vec, control_vec)
                    if result > 0:
                        logging.info(
                            '+++++ Target achieved in %f seconds! +++++',
                            final_time)
                    else:
                        logging.info('----- Timeout! -----')

            start_pose = 0

        self._recording.log_end()

        return metrics_object.compute(self._recording.path)

    def get_path(self):
        """
        Returns the path were the log was saved.
        """
        return self._recording.path

    def _get_directions(self, current_point, end_point):
        """
        Class that should return the directions to reach a certain goal
        """

        directions = self._planner.get_next_command(
            (current_point.location.x, current_point.location.y, 0.22),
            (current_point.orientation.x, current_point.orientation.y,
             current_point.orientation.z),
            (end_point.location.x, end_point.location.y, 0.22),
            (end_point.orientation.x, end_point.orientation.y,
             end_point.orientation.z))
        return directions

    def _get_shortest_path(self, start_point, end_point):
        """
        Calculates the shortest path between two points considering the road netowrk
        """

        return self._planner.get_shortest_path_distance(
            [start_point.location.x, start_point.location.y, 0.22],
            [start_point.orientation.x, start_point.orientation.y, 0.22],
            [end_point.location.x, end_point.location.y, end_point.location.z],
            [
                end_point.orientation.x, end_point.orientation.y,
                end_point.orientation.z
            ])

    def _run_navigation_episode(self, agent, client, time_out, target,
                                episode_name):
        """
         Run one episode of the benchmark (Pose) for a certain agent.


        Args:
            agent: the agent object
            client: an object of the carla client to communicate
            with the CARLA simulator
            time_out: the time limit to complete this episode
            target: the target to reach
            episode_name: The name for saving images of this episode

        """

        # Send an initial command.
        measurements, sensor_data = client.read_data()
        client.send_control(VehicleControl())

        initial_timestamp = measurements.game_timestamp
        current_timestamp = initial_timestamp

        # The vector containing all measurements produced on this episode
        measurement_vec = []
        # The vector containing all controls produced on this episode
        control_vec = []
        frame = 0
        distance = 10000
        success = False

        while (current_timestamp - initial_timestamp) < (time_out *
                                                         1000) and not success:

            # Read data from server with the client
            measurements, sensor_data = client.read_data()
            # The directions to reach the goal are calculated.
            directions = self._get_directions(
                measurements.player_measurements.transform, target)
            # Agent process the data.
            # control = agent.run_step(measurements, sensor_data, directions, target)
            control = agent.run_step(measurements, sensor_data, directions,
                                     target)
            # Send the control commands to the vehicle
            client.send_control(control)

            # save images if the flag is activated
            self._recording.save_images(sensor_data, episode_name, frame)

            current_x = measurements.player_measurements.transform.location.x
            current_y = measurements.player_measurements.transform.location.y

            logging.info("Controller is Inputting:")
            logging.info('Steer = %f Throttle = %f Brake = %f ', control.steer,
                         control.throttle, control.brake)

            current_timestamp = measurements.game_timestamp
            # Get the distance travelled until now
            distance = sldist([current_x, current_y],
                              [target.location.x, target.location.y])
            # Write status of the run on verbose mode
            logging.info('Status:')
            logging.info('[d=%f] c_x = %f, c_y = %f ---> t_x = %f, t_y = %f',
                         float(distance), current_x, current_y,
                         target.location.x, target.location.y)
            # Check if reach the target
            if distance < self._distance_for_success:
                success = True

            # Increment the vectors and append the measurements and controls.
            frame += 1
            measurement_vec.append(measurements.player_measurements)
            control_vec.append(control)

        if success:
            return 1, measurement_vec, control_vec, float(
                current_timestamp - initial_timestamp) / 1000.0, distance
        return 0, measurement_vec, control_vec, time_out, distance
Esempio n. 3
0
class CarlaEnvironment(Environment):
    def __init__(self, level: LevelSelection, seed: int, frame_skip: int,
                 human_control: bool, custom_reward_threshold: Union[int,
                                                                     float],
                 visualization_parameters: VisualizationParameters,
                 server_height: int, server_width: int, camera_height: int,
                 camera_width: int, verbose: bool,
                 experiment_suite: ExperimentSuite, config: str,
                 episode_max_time: int, allow_braking: bool,
                 quality: CarlaEnvironmentParameters.Quality,
                 cameras: List[CameraTypes], weather_id: List[int],
                 experiment_path: str,
                 separate_actions_for_throttle_and_brake: bool,
                 num_speedup_steps: int, max_speed: float, **kwargs):
        super().__init__(level, seed, frame_skip, human_control,
                         custom_reward_threshold, visualization_parameters)

        # server configuration
        self.server_height = server_height
        self.server_width = server_width
        self.port = get_open_port()
        self.host = 'localhost'
        self.map_name = CarlaLevel[level.upper()].value['map_name']
        self.map_path = CarlaLevel[level.upper()].value['map_path']
        self.experiment_path = experiment_path

        # client configuration
        self.verbose = verbose
        self.quality = quality
        self.cameras = cameras
        self.weather_id = weather_id
        self.episode_max_time = episode_max_time
        self.allow_braking = allow_braking
        self.separate_actions_for_throttle_and_brake = separate_actions_for_throttle_and_brake
        self.camera_width = camera_width
        self.camera_height = camera_height

        # setup server settings
        self.experiment_suite = experiment_suite
        self.config = config
        if self.config:
            # load settings from file
            with open(self.config, 'r') as fp:
                self.settings = fp.read()
        else:
            # hard coded settings
            self.settings = CarlaSettings()
            self.settings.set(SynchronousMode=True,
                              SendNonPlayerAgentsInfo=False,
                              NumberOfVehicles=15,
                              NumberOfPedestrians=30,
                              WeatherId=random.choice(
                                  force_list(self.weather_id)),
                              QualityLevel=self.quality.value,
                              SeedVehicles=seed,
                              SeedPedestrians=seed)
            if seed is None:
                self.settings.randomize_seeds()

            self.settings = self._add_cameras(self.settings, self.cameras,
                                              self.camera_width,
                                              self.camera_height)

        # open the server
        self.server = self._open_server()

        logging.disable(40)

        # open the client
        self.game = CarlaClient(self.host, self.port, timeout=99999999)
        self.game.connect()
        if self.experiment_suite:
            self.current_experiment_idx = 0
            self.current_experiment = self.experiment_suite.get_experiments()[
                self.current_experiment_idx]
            self.scene = self.game.load_settings(
                self.current_experiment.conditions)
        else:
            self.scene = self.game.load_settings(self.settings)

        # get available start positions
        self.positions = self.scene.player_start_spots
        self.num_positions = len(self.positions)
        self.current_start_position_idx = 0
        self.current_pose = 0

        # state space
        self.state_space = StateSpace({
            "measurements":
            VectorObservationSpace(
                4, measurements_names=["forward_speed", "x", "y", "z"])
        })
        for camera in self.scene.sensors:
            self.state_space[camera.name] = ImageObservationSpace(
                shape=np.array([self.camera_height, self.camera_width, 3]),
                high=255)

        # action space
        if self.separate_actions_for_throttle_and_brake:
            self.action_space = BoxActionSpace(
                shape=3,
                low=np.array([-1, 0, 0]),
                high=np.array([1, 1, 1]),
                descriptions=["steer", "gas", "brake"])
        else:
            self.action_space = BoxActionSpace(
                shape=2,
                low=np.array([-1, -1]),
                high=np.array([1, 1]),
                descriptions=["steer", "gas_and_brake"])

        # human control
        if self.human_control:
            # convert continuous action space to discrete
            self.steering_strength = 0.5
            self.gas_strength = 1.0
            self.brake_strength = 0.5
            # TODO: reverse order of actions
            self.action_space = PartialDiscreteActionSpaceMap(
                target_actions=[[0., 0.], [0., -self.steering_strength],
                                [0., self.steering_strength],
                                [self.gas_strength, 0.],
                                [-self.brake_strength, 0],
                                [self.gas_strength, -self.steering_strength],
                                [self.gas_strength, self.steering_strength],
                                [self.brake_strength, -self.steering_strength],
                                [self.brake_strength, self.steering_strength]],
                descriptions=[
                    'NO-OP', 'TURN_LEFT', 'TURN_RIGHT', 'GAS', 'BRAKE',
                    'GAS_AND_TURN_LEFT', 'GAS_AND_TURN_RIGHT',
                    'BRAKE_AND_TURN_LEFT', 'BRAKE_AND_TURN_RIGHT'
                ])

            # map keyboard keys to actions
            for idx, action in enumerate(self.action_space.descriptions):
                for key in key_map.keys():
                    if action == key:
                        self.key_to_action[key_map[key]] = idx

        self.num_speedup_steps = num_speedup_steps
        self.max_speed = max_speed

        # measurements
        self.autopilot = None
        self.planner = Planner(self.map_name)

        # env initialization
        self.reset_internal_state(True)

        # render
        if self.is_rendered:
            image = self.get_rendered_image()
            self.renderer.create_screen(image.shape[1], image.shape[0])

    def _add_cameras(self, settings, cameras, camera_width, camera_height):
        # add a front facing camera
        if CameraTypes.FRONT in cameras:
            camera = Camera(CameraTypes.FRONT.value)
            camera.set(FOV=100)
            camera.set_image_size(camera_width, camera_height)
            camera.set_position(2.0, 0, 1.4)
            camera.set_rotation(-15.0, 0, 0)
            settings.add_sensor(camera)

        # add a left facing camera
        if CameraTypes.LEFT in cameras:
            camera = Camera(CameraTypes.LEFT.value)
            camera.set(FOV=100)
            camera.set_image_size(camera_width, camera_height)
            camera.set_position(2.0, 0, 1.4)
            camera.set_rotation(-15.0, -30, 0)
            settings.add_sensor(camera)

        # add a right facing camera
        if CameraTypes.RIGHT in cameras:
            camera = Camera(CameraTypes.RIGHT.value)
            camera.set(FOV=100)
            camera.set_image_size(camera_width, camera_height)
            camera.set_position(2.0, 0, 1.4)
            camera.set_rotation(-15.0, 30, 0)
            settings.add_sensor(camera)

        # add a front facing depth camera
        if CameraTypes.DEPTH in cameras:
            camera = Camera(CameraTypes.DEPTH.value)
            camera.set_image_size(camera_width, camera_height)
            camera.set_position(0.2, 0, 1.3)
            camera.set_rotation(8, 30, 0)
            camera.PostProcessing = 'Depth'
            settings.add_sensor(camera)

        # add a front facing semantic segmentation camera
        if CameraTypes.SEGMENTATION in cameras:
            camera = Camera(CameraTypes.SEGMENTATION.value)
            camera.set_image_size(camera_width, camera_height)
            camera.set_position(0.2, 0, 1.3)
            camera.set_rotation(8, 30, 0)
            camera.PostProcessing = 'SemanticSegmentation'
            settings.add_sensor(camera)

        return settings

    def _get_directions(self, current_point, end_point):
        """
        Class that should return the directions to reach a certain goal
        """

        directions = self.planner.get_next_command(
            (current_point.location.x, current_point.location.y, 0.22),
            (current_point.orientation.x, current_point.orientation.y,
             current_point.orientation.z),
            (end_point.location.x, end_point.location.y, 0.22),
            (end_point.orientation.x, end_point.orientation.y,
             end_point.orientation.z))
        return directions

    def _open_server(self):
        log_path = path.join(
            self.experiment_path if self.experiment_path is not None else '.',
            'logs', "CARLA_LOG_{}.txt".format(self.port))
        if not os.path.exists(os.path.dirname(log_path)):
            os.makedirs(os.path.dirname(log_path))
        with open(log_path, "wb") as out:
            cmd = [
                path.join(environ.get('CARLA_ROOT'),
                          'CarlaUE4.sh'), self.map_path, "-benchmark",
                "-carla-server", "-fps={}".format(30 / self.frame_skip),
                "-world-port={}".format(self.port),
                "-windowed -ResX={} -ResY={}".format(self.server_width,
                                                     self.server_height),
                "-carla-no-hud"
            ]

            if self.config:
                cmd.append("-carla-settings={}".format(self.config))
            p = subprocess.Popen(cmd, stdout=out, stderr=out)

        return p

    def _close_server(self):
        os.killpg(os.getpgid(self.server.pid), signal.SIGKILL)

    def _update_state(self):
        # get measurements and observations
        measurements = []
        while type(measurements) == list:
            measurements, sensor_data = self.game.read_data()
        self.state = {}

        for camera in self.scene.sensors:
            self.state[camera.name] = sensor_data[camera.name].data

        self.location = [
            measurements.player_measurements.transform.location.x,
            measurements.player_measurements.transform.location.y,
            measurements.player_measurements.transform.location.z
        ]

        is_collision = measurements.player_measurements.collision_vehicles != 0 \
                       or measurements.player_measurements.collision_pedestrians != 0 \
                       or measurements.player_measurements.collision_other != 0

        speed_reward = measurements.player_measurements.forward_speed - 1
        if speed_reward > 30.:
            speed_reward = 30.
        self.reward = speed_reward \
                      - (measurements.player_measurements.intersection_otherlane * 5) \
                      - (measurements.player_measurements.intersection_offroad * 5) \
                      - is_collision * 100 \
                      - np.abs(self.control.steer) * 10

        # update measurements
        self.measurements = [measurements.player_measurements.forward_speed
                             ] + self.location
        self.autopilot = measurements.player_measurements.autopilot_control

        # The directions to reach the goal (0 Follow lane, 1 Left, 2 Right, 3 Straight)
        directions = int(
            self._get_directions(measurements.player_measurements.transform,
                                 self.current_goal) - 2)
        self.state['high_level_command'] = directions

        if (measurements.game_timestamp >=
                self.episode_max_time) or is_collision:
            # screen.success('EPISODE IS DONE. GameTime: {}, Collision: {}'.format(str(measurements.game_timestamp),
            #                                                                      str(is_collision)))
            self.done = True

        self.state['measurements'] = np.array(self.measurements)

    def _take_action(self, action):
        self.control = VehicleControl()

        if self.separate_actions_for_throttle_and_brake:
            self.control.steer = np.clip(action[0], -1, 1)
            self.control.throttle = np.clip(action[1], 0, 1)
            self.control.brake = np.clip(action[2], 0, 1)
        else:
            # transform the 2 value action (steer, throttle - brake) into a 3 value action (steer, throttle, brake)
            self.control.steer = np.clip(action[0], -1, 1)
            self.control.throttle = np.clip(action[1], 0, 1)
            self.control.brake = np.abs(np.clip(action[1], -1, 0))

        # prevent braking
        if not self.allow_braking or self.control.brake < 0.1 or self.control.throttle > self.control.brake:
            self.control.brake = 0

        # prevent over speeding
        if hasattr(self, 'measurements') and self.measurements[
                0] * 3.6 > self.max_speed and self.control.brake == 0:
            self.control.throttle = 0.0

        self.control.hand_brake = False
        self.control.reverse = False

        self.game.send_control(self.control)

    def _load_experiment(self, experiment_idx):
        self.current_experiment = self.experiment_suite.get_experiments(
        )[experiment_idx]
        self.scene = self.game.load_settings(
            self.current_experiment.conditions)
        self.positions = self.scene.player_start_spots
        self.num_positions = len(self.positions)
        self.current_start_position_idx = 0
        self.current_pose = 0

    def _restart_environment_episode(self, force_environment_reset=False):
        # select start and end positions
        if self.experiment_suite:
            # if an expeirent suite is available, follow its given poses
            if self.current_pose >= len(self.current_experiment.poses):
                # load a new experiment
                self.current_experiment_idx = (
                    self.current_experiment_idx + 1) % len(
                        self.experiment_suite.get_experiments())
                self._load_experiment(self.current_experiment_idx)

            self.current_start_position_idx = self.current_experiment.poses[
                self.current_pose][0]
            self.current_goal = self.positions[self.current_experiment.poses[
                self.current_pose][1]]
            self.current_pose += 1
        else:
            # go over all the possible positions in a cyclic manner
            self.current_start_position_idx = (
                self.current_start_position_idx + 1) % self.num_positions

            # choose a random goal destination
            self.current_goal = random.choice(self.positions)

        try:
            self.game.start_episode(self.current_start_position_idx)
        except:
            self.game.connect()
            self.game.start_episode(self.current_start_position_idx)

        # start the game with some initial speed
        for i in range(self.num_speedup_steps):
            self.control = VehicleControl(throttle=1.0,
                                          brake=0,
                                          steer=0,
                                          hand_brake=False,
                                          reverse=False)
            self.game.send_control(VehicleControl())

    def get_rendered_image(self) -> np.ndarray:
        """
        Return a numpy array containing the image that will be rendered to the screen.
        This can be different from the observation. For example, mujoco's observation is a measurements vector.
        :return: numpy array containing the image that will be rendered to the screen
        """
        image = [self.state[camera.name] for camera in self.scene.sensors]
        image = np.vstack(image)
        return image
Esempio n. 4
0
class CarlaEnv(gym.Env):
    def __init__(self, config=ENV_CONFIG, enable_autopilot=False):
        self.enable_autopilot = enable_autopilot
        self.config = config
        self.config["x_res"], self.config["y_res"] = IMG_SIZE
        self.city = self.config["server_map"].split("/")[-1]
        if self.config["enable_planner"]:
            self.planner = Planner(self.city)
            self.intersections_node = self.planner._city_track._map.get_intersection_nodes(
            )
            self.intersections_pos = np.array([
                self.planner._city_track._map.convert_to_world(
                    intersection_node)
                for intersection_node in self.intersections_node
            ])
            self.pre_intersection = np.array([0.0, 0.0])

            # # Cartesian coordinates
            self.headings = np.array([[1, 0], [-1, 0], [0, 1], [0, -1]])
            # self.lrs_matrix = {"GO_STRAIGHT": np.array([[1,0],[0,1]]),\
            #                    "TURN_RIGHT": np.array([[0,-1],[1,0]]),\
            #                    "TURN_LEFT": np.array([[0,1],[-1,0]])}
            # self.goal_heading = np.array([0.0,0.0])
            # self.current_heading = None
            # self.pre_heading = None
            # self.angular_speed = None

            # Angular degree
            self.headings_degree = np.array(
                [0.0, 180.0, 90.0, -90.0])  # one-one mapping to self.headings
            self.lrs_degree = {
                "GO_STRAIGHT": 0.0,
                "TURN_LEFT": -90.0,
                "TURN_RIGHT": 90.0
            }
            self.goal_heading_degree = 0.0
            self.current_heading_degree = None
            self.pre_heading_degree = None
            self.angular_speed_degree = np.array(0.0)

        # The Action Space
        if config["discrete_actions"]:
            self.action_space = Discrete(
                len(DISCRETE_ACTIONS
                    ))  # It will be transformed to continuous 2D action.
        else:
            self.action_space = Box(-1.0, 1.0, shape=(2, ),
                                    dtype=np.float32)  # 2D action.

        if config["use_sensor"] == 'use_semantic':
            image_space = Box(0.0,
                              255.0,
                              shape=(config["y_res"], config["x_res"],
                                     1 * config["framestack"]),
                              dtype=np.float32)
        elif config["use_sensor"] in ['use_depth', 'use_logdepth']:
            image_space = Box(0.0,
                              255.0,
                              shape=(config["y_res"], config["x_res"],
                                     1 * config["framestack"]),
                              dtype=np.float32)
        elif config["use_sensor"] == 'use_rgb':
            image_space = Box(0,
                              255,
                              shape=(config["y_res"], config["x_res"],
                                     3 * config["framestack"]),
                              dtype=np.uint8)
        elif config["use_sensor"] == 'use_2rgb':
            image_space = Box(0,
                              255,
                              shape=(config["y_res"], config["x_res"],
                                     2 * 3 * config["framestack"]),
                              dtype=np.uint8)

        # The Observation Space
        self.observation_space = Tuple([
            image_space,
            Discrete(len(COMMANDS_ENUM)),  # next_command
            Box(-128.0, 128.0, shape=(2, ),
                dtype=np.float32)  # forward_speed, dist to goal
        ])

        # TODO(ekl) this isn't really a proper gym spec
        self._spec = lambda: None
        self._spec.id = "Carla-v0"

        self.server_port = None
        self.server_process = None
        self.client = None
        self.num_steps = 0
        self.total_reward = 0
        self.prev_measurement = None
        self.prev_image = None
        self.episode_id = None
        self.measurements_file = None
        self.weather = None
        self.scenario = None
        self.start_pos = None
        self.end_pos = None
        self.start_coord = None
        self.end_coord = None
        self.last_obs = None

        self.cnt1 = None
        self.displacement = None
        self.next_command = "LANE_FOLLOW"

    def init_server(self):
        print("Initializing new Carla server...")
        # Create a new server process and start the client.
        self.server_port = random.randint(10000, 60000)
        self.server_process = subprocess.Popen(
            [
                SERVER_BINARY,
                self.config["server_map"],
                "-windowed",
                "-ResX=480",
                "-ResY=360",
                "-carla-server",
                "-benchmark -fps=10",  # "-benchmark -fps=10": to run the simulation at a fixed time-step of 0.1 seconds
                "-carla-world-port={}".format(self.server_port)
            ],
            preexec_fn=os.setsid,
            stdout=open(os.devnull, "w")
        )  #  ResourceWarning: unclosed file <_io.TextIOWrapper name='/dev/null' mode='w' encoding='UTF-8'>
        live_carla_processes.add(os.getpgid(self.server_process.pid))

        for i in range(RETRIES_ON_ERROR):
            try:
                self.client = CarlaClient("localhost", self.server_port)
                return self.client.connect()
            except Exception as e:
                print("Error connecting: {}, attempt {}".format(e, i))
                time.sleep(2)

    def clear_server_state(self):
        print("Clearing Carla server state")
        try:
            if self.client:
                self.client.disconnect()
                self.client = None
        except Exception as e:
            print("Error disconnecting client: {}".format(e))
            pass
        if self.server_process:
            pgid = os.getpgid(self.server_process.pid)
            os.killpg(pgid, signal.SIGKILL)
            live_carla_processes.remove(pgid)
            self.server_port = None
            self.server_process = None

    def __del__(
        self
    ):  # the __del__ method will be called when the instance of the class is deleted.(memory is freed.)
        self.clear_server_state()

    def reset(self):
        error = None
        for _ in range(RETRIES_ON_ERROR):
            try:
                if not self.server_process:
                    self.init_server()
                return self._reset()
            except Exception as e:
                print("Error during reset: {}".format(traceback.format_exc()))
                self.clear_server_state()
                error = e
        raise error

    # @set_timeout(15)
    def _reset(self):
        self.num_steps = 0
        self.total_reward = 0
        self.prev_measurement = None
        self.prev_image = None
        self.episode_id = datetime.today().strftime("%Y-%m-%d_%H-%M-%S_%f")
        self.measurements_file = None

        self.pre_intersection = np.array([0.0, 0.0])

        # Create a CarlaSettings object. This object is a wrapper around
        # the CarlaSettings.ini file. Here we set the configuration we
        # want for the new episode.
        settings = CarlaSettings()
        self.scenario = random.choice(self.config["scenarios"])
        assert self.scenario["city"] == self.city, (self.scenario, self.city)
        self.weather = random.choice(self.scenario["weather_distribution"])
        settings.set(
            SynchronousMode=True,
            # ServerTimeOut=10000, # CarlaSettings: no key named 'ServerTimeOut'
            SendNonPlayerAgentsInfo=True,
            NumberOfVehicles=self.scenario["num_vehicles"],
            NumberOfPedestrians=self.scenario["num_pedestrians"],
            WeatherId=self.weather)
        settings.randomize_seeds()

        if self.config["use_sensor"] == 'use_semantic':
            camera0 = Camera("CameraSemantic",
                             PostProcessing="SemanticSegmentation")
            camera0.set_image_size(self.config["render_x_res"],
                                   self.config["render_y_res"])
            # camera0.set_position(30, 0, 130)
            camera0.set(FOV=120)
            camera0.set_position(2.0, 0.0, 1.4)
            camera0.set_rotation(0.0, 0.0, 0.0)

            settings.add_sensor(camera0)

        if self.config["use_sensor"] in ['use_depth', 'use_logdepth']:
            camera1 = Camera("CameraDepth", PostProcessing="Depth")
            camera1.set_image_size(self.config["render_x_res"],
                                   self.config["render_y_res"])
            # camera1.set_position(30, 0, 130)
            camera1.set(FOV=120)
            camera1.set_position(2.0, 0.0, 1.4)
            camera1.set_rotation(0.0, 0.0, 0.0)

            settings.add_sensor(camera1)

        if self.config["use_sensor"] == 'use_rgb':
            camera2 = Camera("CameraRGB")
            camera2.set_image_size(self.config["render_x_res"],
                                   self.config["render_y_res"])
            # camera2.set_position(30, 0, 130)
            # camera2.set_position(0.3, 0.0, 1.3)
            camera2.set(FOV=120)
            camera2.set_position(2.0, 0.0, 1.4)
            camera2.set_rotation(0.0, 0.0, 0.0)

            settings.add_sensor(camera2)

        if self.config["use_sensor"] == 'use_2rgb':
            camera_l = Camera("CameraRGB_L")
            camera_l.set_image_size(self.config["render_x_res"],
                                    self.config["render_y_res"])
            camera_l.set(FOV=120)
            camera_l.set_position(2.0, -0.1, 1.4)
            camera_l.set_rotation(0.0, 0.0, 0.0)
            settings.add_sensor(camera_l)

            camera_r = Camera("CameraRGB_R")
            camera_r.set_image_size(self.config["render_x_res"],
                                    self.config["render_y_res"])
            camera_r.set(FOV=120)
            camera_r.set_position(2.0, 0.1, 1.4)
            camera_r.set_rotation(0.0, 0.0, 0.0)
            settings.add_sensor(camera_r)

        # Setup start and end positions
        scene = self.client.load_settings(settings)
        self.positions = positions = scene.player_start_spots
        self.start_pos = positions[self.scenario["start_pos_id"]]

        self.pre_pos = self.start_pos.location
        self.cnt1 = 0
        self.displacement = 1000.0

        self.end_pos = positions[self.scenario["end_pos_id"]]
        self.start_coord = [
            self.start_pos.location.x // 1, self.start_pos.location.y // 1
        ]
        self.end_coord = [
            self.end_pos.location.x // 1, self.end_pos.location.y // 1
        ]
        print("Start pos {} ({}), end {} ({})".format(
            self.scenario["start_pos_id"], self.start_coord,
            self.scenario["end_pos_id"], self.end_coord))

        # Notify the server that we want to start the episode at the
        # player_start index. This function blocks until the server is ready
        # to start the episode.
        print("Starting new episode...")
        self.client.start_episode(self.scenario["start_pos_id"])

        # remove the vehicle dropping when starting a new episode.
        cnt = 1
        z1 = 0
        zero_control = VehicleControl()
        while (cnt < 3):
            self.client.send_control(
                zero_control
            )  # VehicleControl().steer = 0, VehicleControl().throttle = 0, VehicleControl().reverse = False
            z0 = z1
            z1 = self.client.read_data(
            )[0].player_measurements.transform.location.z
            print(z1)
            if z0 - z1 == 0:
                cnt += 1
        print('Starting new episode done.\n')

        # Process observations: self._read_observation() returns image and py_measurements.
        image, py_measurements = self._read_observation()
        self.prev_measurement = py_measurements

        # self.current_heading = self.pre_heading = np.array([py_measurements["x_orient"], py_measurements["y_orient"]])
        # self.angular_speed = 0.0

        self.pre_heading_degree = self.current_heading_degree = py_measurements[
            "current_heading_degree"]
        self.angular_speed_degree = np.array(0.0)

        return self.encode_obs(self.preprocess_image(image),
                               py_measurements), py_measurements

    def encode_obs(self, image, py_measurements):
        assert self.config["framestack"] in [1, 2]
        prev_image = self.prev_image
        self.prev_image = image
        if prev_image is None:
            prev_image = image
        if self.config["framestack"] == 2:
            image = np.concatenate([prev_image, image], axis=2)
        # obs = (image, COMMAND_ORDINAL[py_measurements["next_command"]], [
        #     py_measurements["forward_speed"],
        #     py_measurements["distance_to_goal"]
        # ])
        obs = image
        self.last_obs = obs
        return obs

    def step(self, action):
        try:
            obs = self._step(action)
            return obs
        except Exception:
            print("Error during step, terminating episode early",
                  traceback.format_exc())
            self.clear_server_state()
            return (self.last_obs, 0.0, True, self.prev_measurement)

    def delta_degree(self, deltaxy):
        return deltaxy if np.abs(
            deltaxy) < 180 else deltaxy - np.sign(deltaxy) * 360

    # image, py_measurements = self._read_observation()  --->  self.preprocess_image(image)   --->  step observation output
    # @set_timeout(10)
    def _step(self, action):

        if self.config["discrete_actions"]:
            action = DISCRETE_ACTIONS[int(action)]  # Carla action is 2D.
        assert len(action) == 2, "Invalid action {}".format(action)

        if self.enable_autopilot:
            action[
                0] = self.autopilot.brake if self.autopilot.brake < 0 else self.autopilot.throttle
            action[1] = self.autopilot.steer
        if self.config["squash_action_logits"]:
            forward = 2 * float(sigmoid(action[0]) - 0.5)
            throttle = float(np.clip(forward, 0, 1))
            brake = float(np.abs(np.clip(forward, -1, 0)))
            steer = 2 * float(sigmoid(action[1]) - 0.5)
        else:
            throttle = float(np.clip(action[0], 0, 1))
            brake = float(np.abs(np.clip(action[0], -1, 0)))
            steer = float(np.clip(action[1], -1, 1))

        # reverse and hand_brake are disabled.
        reverse = False
        hand_brake = False

        if self.config["verbose"]:
            print("steer", steer, "throttle", throttle, "brake", brake,
                  "reverse", reverse)

        # print(self.client)
        self.client.send_control(steer=steer,
                                 throttle=throttle,
                                 brake=brake,
                                 hand_brake=hand_brake,
                                 reverse=reverse)

        # Process observations: self._read_observation() returns image and py_measurements.
        image, py_measurements = self._read_observation()
        if self.config["verbose"]:
            print("Next command", py_measurements["next_command"])
        print("Next command", py_measurements["next_command"])
        print("dist_to_intersection:", py_measurements["dist_to_intersection"])

        if type(action) is np.ndarray:
            py_measurements["action"] = [float(a) for a in action]
        else:
            py_measurements["action"] = action
        py_measurements["control"] = {
            "steer": steer,
            "throttle": throttle,
            "brake": brake,
            "reverse": reverse,
            "hand_brake": hand_brake,
        }

        # compute angular_speed
        self.current_heading_degree = py_measurements["current_heading_degree"]
        self.angular_speed_degree = np.array(
            self.delta_degree(self.current_heading_degree -
                              self.pre_heading_degree))
        self.pre_heading_degree = py_measurements["current_heading_degree"]

        # compute reward
        reward = compute_reward(self, self.prev_measurement, py_measurements)

        self.total_reward += reward
        py_measurements["reward"] = reward
        py_measurements["total_reward"] = self.total_reward

        # done or not
        # done = False
        # done = self.cnt1 > 50 and (py_measurements["collision_vehicles"] or py_measurements["collision_pedestrians"] or py_measurements["collision_other"] or self.displacement < 0.5)
        done = self.cnt1 > 50 and self.displacement < 0.2

        # done = (self.num_steps > self.scenario["max_steps"]
        #         or py_measurements["next_command"] == "REACH_GOAL" or py_measurements["intersection_offroad"] or py_measurements["intersection_otherlane"]
        #         or (self.config["early_terminate_on_collision"]
        #             and collided_done(py_measurements)))

        py_measurements["done"] = done
        self.prev_measurement = py_measurements

        # Write out measurements to file
        if self.config["verbose"] and CARLA_OUT_PATH:
            if not self.measurements_file:
                self.measurements_file = open(
                    os.path.join(
                        CARLA_OUT_PATH,
                        "measurements_{}.json".format(self.episode_id)), "w")
            self.measurements_file.write(json.dumps(py_measurements))
            self.measurements_file.write("\n")
            if done:
                self.measurements_file.close()
                self.measurements_file = None
                if self.config["convert_images_to_video"]:
                    self.images_to_video()

        self.num_steps += 1
        image = self.preprocess_image(image)
        return (self.encode_obs(image, py_measurements), reward, done,
                py_measurements)

    def images_to_video(self):
        videos_dir = os.path.join(CARLA_OUT_PATH, "Videos")
        if not os.path.exists(videos_dir):
            os.makedirs(videos_dir)
        ffmpeg_cmd = (
            "ffmpeg -loglevel -8 -r 60 -f image2 -s {x_res}x{y_res} "
            "-start_number 0 -i "
            "{img}_%04d.jpg -vcodec libx264 {vid}.mp4 && rm -f {img}_*.jpg "
        ).format(x_res=self.config["render_x_res"],
                 y_res=self.config["render_y_res"],
                 vid=os.path.join(videos_dir, self.episode_id),
                 img=os.path.join(CARLA_OUT_PATH, "CameraRGB",
                                  self.episode_id))
        print("Executing ffmpeg command", ffmpeg_cmd)
        subprocess.call(ffmpeg_cmd, shell=True)

    def preprocess_image(self, image):

        if self.config[
                "use_sensor"] == 'use_semantic':  # image.data: uint8(0 ~ 12)
            data = image.data * 21  # data: uint8(0 ~ 255)
            data = data.reshape(self.config["render_y_res"],
                                self.config["render_x_res"], 1)
            data = cv2.resize(data,
                              (self.config["x_res"], self.config["y_res"]),
                              interpolation=cv2.INTER_AREA)
            data = np.expand_dims(
                data, 2)  # data: uint8(0 ~ 255),  shape(y_res, x_res, 1)

        elif self.config["use_sensor"] == 'use_depth':  # depth: float64(0 ~ 1)
            # data = (image.data - 0.5) * 2
            data = image.data * 255  # data: float64(0 ~ 255)
            data = data.reshape(self.config["render_y_res"],
                                self.config["render_x_res"],
                                1)  # shape(render_y_res,render_x_res,1)
            data = cv2.resize(
                data, (self.config["x_res"], self.config["y_res"]),
                interpolation=cv2.INTER_AREA)  # shape(y_res, x_res)
            data = np.expand_dims(
                data, 2)  # data: float64(0 ~ 255),  shape(y_res, x_res, 1)

        elif self.config[
                "use_sensor"] == 'use_logdepth':  # depth: float64(0 ~ 1)
            data = (np.log(image.data) +
                    7.0) * 255.0 / 7.0  # data: float64(0 ~ 255)
            data = data.reshape(self.config["render_y_res"],
                                self.config["render_x_res"],
                                1)  # shape(render_y_res,render_x_res,1)
            data = cv2.resize(
                data, (self.config["x_res"], self.config["y_res"]),
                interpolation=cv2.INTER_AREA)  # shape(y_res, x_res)
            data = np.expand_dims(
                data, 2)  # data: float64(0 ~ 255),  shape(y_res, x_res, 1)

        elif self.config["use_sensor"] == 'use_rgb':
            data = image.data.reshape(self.config["render_y_res"],
                                      self.config["render_x_res"], 3)
            data = cv2.resize(
                data,
                (self.config["x_res"], self.config["y_res"]
                 ),  # data: uint8(0 ~ 255),  shape(y_res, x_res, 3)
                interpolation=cv2.INTER_AREA)
            # data = (data.astype(np.float32) - 128) / 128

        elif self.config["use_sensor"] == 'use_2rgb':
            data_l, data_r = image[0].data, image[0].data
            data_l = data_l.reshape(self.config["render_y_res"],
                                    self.config["render_x_res"], 3)
            data_r = data_r.reshape(self.config["render_y_res"],
                                    self.config["render_x_res"], 3)
            data_l = cv2.resize(data_l,
                                (self.config["x_res"], self.config["y_res"]),
                                interpolation=cv2.INTER_AREA)
            data_r = cv2.resize(data_r,
                                (self.config["x_res"], self.config["y_res"]),
                                interpolation=cv2.INTER_AREA)

            data = np.concatenate(
                (data_l, data_r),
                axis=2)  # data: uint8(0 ~ 255),  shape(y_res, x_res, 6)

        return data

    def _read_observation(self):
        # Read the data produced by the server this frame.
        measurements, sensor_data = self.client.read_data()

        if self.enable_autopilot:
            self.autopilot = measurements.player_measurements.autopilot_control

        # Print some of the measurements.
        if self.config["verbose"]:
            print_measurements(measurements)

        observation = None

        if self.config["use_sensor"] == 'use_semantic':
            camera_name = "CameraSemantic"

        elif self.config["use_sensor"] in ['use_depth', 'use_logdepth']:
            camera_name = "CameraDepth"

        elif self.config["use_sensor"] == 'use_rgb':
            camera_name = "CameraRGB"

        elif self.config["use_sensor"] == 'use_2rgb':
            camera_name = ["CameraRGB_L", "CameraRGB_R"]

        # for name, image in sensor_data.items():
        #     if name == camera_name:
        #         observation = image
        if camera_name == ["CameraRGB_L", "CameraRGB_R"]:
            observation = [
                sensor_data["CameraRGB_L"], sensor_data["CameraRGB_R"]
            ]
        else:
            observation = sensor_data[camera_name]

        cur = measurements.player_measurements

        if self.config["enable_planner"]:

            # modify next_command
            current_pos = np.array([cur.transform.location.x, cur.transform.location.y])\
                + np.array([cur.transform.orientation.x, cur.transform.orientation.y]) * 5.0
            dist_intersection_current_pos = np.linalg.norm(
                self.intersections_pos[:, :2] - current_pos, axis=1)
            is_near_intersection = np.min(dist_intersection_current_pos) < 18.0
            if not is_near_intersection:
                self.next_command = "LANE_FOLLOW"

            # goal_heading
            if is_near_intersection:
                cur_intersection = self.intersections_pos[
                    dist_intersection_current_pos.argmin(), :2]
                self.dist_to_intersection = np.linalg.norm(cur_intersection -
                                                           current_pos)
            else:
                self.goal_heading_degree = 0.0
                self.dist_to_intersection = 1000.0
            if is_near_intersection and np.linalg.norm(self.pre_intersection -
                                                       cur_intersection) > 0.1:
                self.next_command = COMMANDS_ENUM[self.planner.get_next_command(
                    [cur.transform.location.x, cur.transform.location.y, GROUND_Z], \
                    [cur.transform.orientation.x, cur.transform.orientation.y, GROUND_Z], \
                    [self.end_pos.location.x, self.end_pos.location.y, GROUND_Z], \
                    [self.end_pos.orientation.x, self.end_pos.orientation.y, GROUND_Z] \
                    )]
                if self.next_command == "LANE_FOLLOW":
                    self.next_command = random.choice(
                        ["GO_STRAIGHT", "TURN_LEFT", "TURN_RIGHT"])
                cur_heading0 = cur_intersection - current_pos
                cur_heading_1 = cur_heading0 / np.linalg.norm(cur_heading0)
                cur_heading_degree = self.headings_degree[np.linalg.norm(
                    cur_heading_1 - self.headings, axis=1).argmin()]
                self.goal_heading_degree = self.delta_degree(
                    cur_heading_degree + self.lrs_degree[self.next_command])

                self.pre_intersection = cur_intersection

        else:
            self.next_command = "LANE_FOLLOW"

        if self.next_command == "REACH_GOAL":
            distance_to_goal = 0.0  # avoids crash in planner
            self.end_pos = self.positions[random.choice(
                self.config["scenarios"])['end_pos_id']]

        # elif self.config["enable_planner"]:
        #     distance_to_goal = self.planner.get_shortest_path_distance([
        #         cur.transform.location.x, cur.transform.location.y, GROUND_Z
        #     ], [
        #         cur.transform.orientation.x, cur.transform.orientation.y,
        #         GROUND_Z
        #     ], [self.end_pos.location.x, self.end_pos.location.y, GROUND_Z], [
        #         self.end_pos.orientation.x, self.end_pos.orientation.y,
        #         GROUND_Z
        #     ])
        else:
            distance_to_goal = -1

        distance_to_goal_euclidean = float(
            np.linalg.norm([  # default norm: L2 norm
                cur.transform.location.x - self.end_pos.location.x,
                cur.transform.location.y - self.end_pos.location.y
            ]))

        # displacement
        if self.cnt1 > 70 and self.cnt1 % 30 == 0:
            self.displacement = float(
                np.linalg.norm([
                    cur.transform.location.x - self.pre_pos.x,
                    cur.transform.location.y - self.pre_pos.y
                ]))
            self.pre_pos = cur.transform.location
        self.cnt1 += 1

        py_measurements = {
            "episode_id": self.episode_id,
            "step": self.num_steps,
            "x": cur.transform.location.x,
            "y": cur.transform.location.y,
            "x_orient": cur.transform.orientation.x,
            "y_orient": cur.transform.orientation.y,
            "forward_speed": np.array(cur.forward_speed),  ###
            "distance_to_goal": distance_to_goal,
            "distance_to_goal_euclidean": distance_to_goal_euclidean,
            "collision_vehicles": cur.collision_vehicles,
            "collision_pedestrians": cur.collision_pedestrians,
            "collision_other": cur.collision_other,
            "collided": collision_(cur),  ###
            "intersection_offroad": np.array(cur.intersection_offroad),  ###
            "intersection_otherlane":
            np.array(cur.intersection_otherlane),  ###
            "weather": self.weather,
            "map": self.config["server_map"],
            "start_coord": self.start_coord,
            "end_coord": self.end_coord,
            "current_scenario": self.scenario,
            "x_res": self.config["x_res"],
            "y_res": self.config["y_res"],
            "num_vehicles": self.scenario["num_vehicles"],
            "num_pedestrians": self.scenario["num_pedestrians"],
            "max_steps": self.scenario["max_steps"],
            "next_command": self.next_command,
            "next_command_id": COMMAND_ORDINAL[self.next_command],
            "displacement": self.displacement,
            "is_near_intersection": is_near_intersection,
            "goal_heading_degree": self.goal_heading_degree,
            "angular_speed_degree": self.angular_speed_degree,  ###
            "current_heading_degree":
            cur.transform.rotation.yaw,  # left-, right+, (-180 ~ 180) degrees
            "dist_to_intersection": self.dist_to_intersection
        }

        if CARLA_OUT_PATH and self.config["log_images"]:
            for name, image in sensor_data.items():
                out_dir = os.path.join(CARLA_OUT_PATH, name)
                if not os.path.exists(out_dir):
                    os.makedirs(out_dir)
                out_file = os.path.join(
                    out_dir, "{}_{:>04}.jpg".format(self.episode_id,
                                                    self.num_steps))
                scipy.misc.imsave(out_file,
                                  image.data)  # image.data without preprocess.

        assert observation is not None, sensor_data
        return observation, py_measurements
Esempio n. 5
0
class DrivingBenchmark(object):
    """
    The Benchmark class, controls the execution of the benchmark interfacing
    an Agent class with a set Suite.


    The benchmark class must be inherited with a class that defines the
    all the experiments to be run by the agent
    """
    def __init__(self,
                 city_name='Town01',
                 name_to_save='Test',
                 continue_experiment=False,
                 save_images=False,
                 distance_for_success=2.0):
        """
        Args
            city_name:
            name_to_save:
            continue_experiment:
            save_images:
            distance_for_success:
            collisions_as_failure: if this flag is set to true, episodes will terminate as failure, when the car collides.
        """

        self.__metaclass__ = abc.ABCMeta

        self._city_name = city_name
        self._base_name = name_to_save
        # The minimum distance for arriving into the goal point in
        # order to consider ir a success
        self._distance_for_success = distance_for_success
        # The object used to record the benchmark and to able to continue after
        self._recording = Recording(name_to_save=name_to_save,
                                    continue_experiment=continue_experiment,
                                    save_images=save_images)

        # We have a default planner instantiated that produces high level commands
        self._planner = Planner(city_name)

        # TO keep track of the previous collisions
        self._previous_pedestrian_collision = 0
        self._previous_vehicle_collision = 0
        self._previous_other_collision = 0

    def benchmark_agent(self, experiment_suite, agent, client):
        """
        Function to benchmark the agent.
        It first check the log file for this benchmark.
        if it exist it continues from the experiment where it stopped.


        Args:
            experiment_suite
            agent: an agent object with the run step class implemented.
            client:


        Return:
            A dictionary with all the metrics computed from the
            agent running the set of experiments.
        """

        # Instantiate a metric object that will be used to compute the metrics for
        # the benchmark afterwards.
        metrics_object = Metrics(experiment_suite.metrics_parameters,
                                 experiment_suite.dynamic_tasks)

        # Function return the current pose and task for this benchmark.
        start_pose, start_experiment = self._recording.get_pose_and_experiment(
            experiment_suite.get_number_of_poses_task())

        logging.info('START')

        for experiment in experiment_suite.get_experiments(
        )[int(start_experiment):]:

            positions = client.load_settings(
                experiment.conditions).player_start_spots

            self._recording.log_start(experiment.task)

            for pose in experiment.poses[start_pose:]:
                for rep in range(experiment.repetitions):

                    start_index = pose[0]
                    end_index = pose[1]

                    client.start_episode(start_index)
                    # Print information on
                    logging.info('======== !!!! ==========')
                    logging.info(' Start Position %d End Position %d ',
                                 start_index, end_index)

                    self._recording.log_poses(start_index, end_index,
                                              experiment.Conditions.WeatherId)

                    # Calculate the initial distance for this episode
                    initial_distance = \
                        sldist(
                            [positions[start_index].location.x, positions[start_index].location.y],
                            [positions[end_index].location.x, positions[end_index].location.y])

                    time_out = experiment_suite.calculate_time_out(
                        self._get_shortest_path(positions[start_index],
                                                positions[end_index]))

                    # running the agent
                    (result, reward_vec, control_vec, final_time, remaining_distance, col_ped, col_veh, col_oth) = \
                        self._run_navigation_episode(
                            agent, client, time_out, positions[end_index],
                            str(experiment.Conditions.WeatherId) + '_'
                            + str(experiment.task) + '_' + str(start_index)
                            + '.' + str(end_index), experiment_suite.metrics_parameters,
                            experiment_suite.collision_as_failure)

                    # Write the general status of the just ran episode
                    self._recording.write_summary_results(
                        experiment, pose, rep, initial_distance,
                        remaining_distance, final_time, time_out, result,
                        col_ped, col_veh, col_oth)

                    # Write the details of this episode.
                    self._recording.write_measurements_results(
                        experiment, rep, pose, reward_vec, control_vec)
                    if result > 0:
                        logging.info(
                            '+++++ Target achieved in %f seconds! +++++',
                            final_time)
                    else:
                        logging.info('----- Timeout! -----')

            start_pose = 0

        self._recording.log_end()

        return metrics_object.compute(self._recording.path)

    def get_path(self):
        """
        Returns the path were the log was saved.
        """
        return self._recording.path

    def _get_directions(self, current_point, end_point):
        """
        Class that should return the directions to reach a certain goal
        """

        directions = self._planner.get_next_command(
            (current_point.location.x, current_point.location.y, 0.22),
            (current_point.orientation.x, current_point.orientation.y,
             current_point.orientation.z),
            (end_point.location.x, end_point.location.y, 0.22),
            (end_point.orientation.x, end_point.orientation.y,
             end_point.orientation.z))
        return directions

    def _get_shortest_path(self, start_point, end_point):
        """
        Calculates the shortest path between two points considering the road netowrk
        """

        return self._planner.get_shortest_path_distance(
            [start_point.location.x, start_point.location.y, 0.22],
            [start_point.orientation.x, start_point.orientation.y, 0.22],
            [end_point.location.x, end_point.location.y, end_point.location.z],
            [
                end_point.orientation.x, end_point.orientation.y,
                end_point.orientation.z
            ])

    def _has_agent_collided(self, measurement, metrics_parameters):
        """
            This function must have a certain state and only look to one measurement.
        """
        collided_veh = 0
        collided_ped = 0
        collided_oth = 0

        if (measurement.collision_vehicles - self._previous_vehicle_collision) \
                > metrics_parameters['collision_vehicles']['threshold']/2.0:
            collided_veh = 1
        if (measurement.collision_pedestrians - self._previous_pedestrian_collision) \
                > metrics_parameters['collision_pedestrians']['threshold']/2.0:
            collided_ped = 1
        if (measurement.collision_other - self._previous_other_collision) \
                > metrics_parameters['collision_other']['threshold']/2.0:
            collided_oth = 1

        self._previous_pedestrian_collision = measurement.collision_pedestrians
        self._previous_vehicle_collision = measurement.collision_other

        return collided_ped, collided_veh, collided_oth

    def _is_agent_stuck(self, measurements, stuck_vec, old_coll):
        # break the episode when the agent is stuck on a static object
        coll_other = measurements.collision_other
        coll_other -= old_coll
        otherlane = measurements.intersection_otherlane > 0.4
        offroad = measurements.intersection_offroad > 0.3
        logging.info(
            "offroad: {}, otherlane: {}, coll_other: {}, old_coll: {}".format(
                offroad, otherlane, coll_other, old_coll))

        # if still driving or got unstuck (v > 4km/h)
        if measurements.forward_speed * 3.6 > 4:
            cycle_signal(stuck_vec, 0)
            if coll_other:
                old_coll += coll_other
        elif offroad or otherlane or coll_other:
            cycle_signal(stuck_vec, 1)
        else:
            cycle_signal(stuck_vec, 0)

        return all(stuck_vec), stuck_vec, old_coll

    def _run_navigation_episode(self, agent, client, time_out, target,
                                episode_name, metrics_parameters,
                                collision_as_failure):
        """
         Run one episode of the benchmark (Pose) for a certain agent.


        Args:
            agent: the agent object
            client: an object of the carla client to communicate
            with the CARLA simulator
            time_out: the time limit to complete this episode
            target: the target to reach
            episode_name: The name for saving images of this episode
            metrics_object: The metrics object to check for collisions

        """

        # Send an initial command.
        time.sleep(2)
        measurements, sensor_data = client.read_data()
        client.send_control(VehicleControl())

        ### Reset CAL agent
        agent.reset_state()

        initial_timestamp = measurements.game_timestamp
        current_timestamp = initial_timestamp

        # The vector containing all measurements produced on this episode
        measurement_vec = []
        # The vector containing all controls produced on this episode
        control_vec = []
        frame = 0
        distance = 10000
        col_ped, col_veh, col_oth = 0, 0, 0
        fail = False
        success = False

        ### own metrics
        stuck_vec = [0] * 60  # measure for 60 frames (6 seconds)
        center_distance_vec = []
        old_collision_value = 0
        direction_vec = []

        while not fail and not success:

            # Read data from server with the client
            measurements, sensor_data = client.read_data()
            # The directions to reach the goal are calculated.
            directions = self._get_directions(
                measurements.player_measurements.transform, target)
            # Agent process the data.
            control = agent.run_step(measurements, sensor_data, directions,
                                     target)

            # Send the control commands to the vehicle
            client.send_control(control)

            # save images if the flag is activated
            self._recording.save_images(sensor_data, episode_name, frame)

            current_x = measurements.player_measurements.transform.location.x
            current_y = measurements.player_measurements.transform.location.y

            logging.info("Controller is Inputting:")
            logging.info('Steer = %f Throttle = %f Brake = %f ', control.steer,
                         control.throttle, control.brake)

            current_timestamp = measurements.game_timestamp
            # Get the distance travelled until now

            distance = sldist([current_x, current_y],
                              [target.location.x, target.location.y])
            # Write status of the run on verbose mode
            logging.info('Distance to target: %f', float(distance))

            # Check if reach the target
            col_ped, col_veh, col_oth = self._has_agent_collided(
                measurements.player_measurements, metrics_parameters)

            ### CHANGE TO ORIGINAL CODE #####################
            is_stuck, stuck_vec, old_collision_value = self._is_agent_stuck(
                measurements.player_measurements, stuck_vec,
                old_collision_value)

            if distance < self._distance_for_success:
                success = True
            elif (current_timestamp - initial_timestamp) > (time_out * 1000):
                fail = True
            elif is_stuck:
                fail = True
            elif collision_as_failure and (col_ped or col_veh or col_oth):
                fail = True

            time_remain = (time_out * 1000 -
                           (current_timestamp - initial_timestamp)) / 1000
            logging.info('Time remaining: %i m %i s', time_remain / 60,
                         time_remain % 60)
            logging.info('')

            # Increment the vectors and append the measurements and controls.
            frame += 1
            measurement_vec.append(measurements.player_measurements)
            control_vec.append(control)

        if success:
            return 1, measurement_vec, control_vec, float(
                current_timestamp - initial_timestamp
            ) / 1000.0, distance, col_ped, col_veh, col_oth
        return 0, measurement_vec, control_vec, time_out, distance, col_ped, col_veh, col_oth
Esempio n. 6
0
class CarlaEnvironmentWrapper(EnvironmentWrapper):
    def __init__(self,
                 num_speedup_steps=30,
                 require_explicit_reset=True,
                 is_render_enabled=False,
                 early_termination_enabled=False,
                 run_offscreen=False,
                 save_screens=False,
                 port=2000,
                 gpu=0,
                 discrete_control=True,
                 kill_when_connection_lost=True,
                 city_name="Town01",
                 channel_last=True):
        EnvironmentWrapper.__init__(self, is_render_enabled, save_screens)

        print("port:", port)

        self.episode_max_time = 1000000
        self.allow_braking = True
        self.log_path = os.path.join(DEFAULT_CARLA_LOG_DIR, "CarlaLogs.txt")
        self.num_speedup_steps = num_speedup_steps
        self.is_game_ready_for_input = False
        self.run_offscreen = run_offscreen
        self.kill_when_connection_lost = kill_when_connection_lost
        # server configuration

        self.port = port
        self.gpu = gpu
        self.host = 'localhost'
        self.level = 'town1'
        self.map = CarlaLevel().get(self.level)

        # experiment = basic_experiment_suite.BasicExperimentSuite(city_name)
        experiment = CoRL2017(city_name)
        self.experiments = experiment.get_experiments()
        self.experiment_type = 0
        self.planner = Planner(city_name)

        self.car_speed = 0
        self.is_game_setup = False  # Will be true only when setup_client_and_server() is called, either explicitly, or by reset()

        # action space
        self.discrete_controls = discrete_control
        self.action_space_size = 3
        self.action_space_high = np.array([1, 1, 1])
        self.action_space_low = np.array([-1, -1, -1])
        self.action_space_abs_range = np.maximum(
            np.abs(self.action_space_low), np.abs(self.action_space_high))
        self.steering_strength = 0.35
        self.gas_strength = 1.0
        self.brake_strength = 0.6
        self.actions = {
            0: [0., 0.],
            1: [0., -self.steering_strength],
            2: [0., self.steering_strength],
            3: [self.gas_strength - 0.15, 0.],
            4: [-self.brake_strength, 0],
            5: [self.gas_strength - 0.3, -self.steering_strength],
            6: [self.gas_strength - 0.3, self.steering_strength],
            7: [-self.brake_strength, -self.steering_strength],
            8: [-self.brake_strength, self.steering_strength]
        }
        self.actions_description = [
            'NO-OP', 'TURN_LEFT', 'TURN_RIGHT', 'GAS', 'BRAKE',
            'GAS_AND_TURN_LEFT', 'GAS_AND_TURN_RIGHT', 'BRAKE_AND_TURN_LEFT',
            'BRAKE_AND_TURN_RIGHT'
        ]
        if discrete_control:
            self.action_space = Discrete(len(self.actions))
        else:
            self.action_space = Box(low=self.action_space_low,
                                    high=self.action_space_high)
        self.observation_space = Box(low=-np.inf,
                                     high=np.inf,
                                     shape=[88, 200, 3])

        # measurements
        self.measurements_size = (1, )

        self.pre_image = None
        self.first_debug = True
        self.channel_last = channel_last

    def setup_client_and_server(self, reconnect_client_only=False):
        # open the server
        if not reconnect_client_only:
            self.server, self.out = self._open_server()
            self.server_pid = self.server.pid  # To kill incase child process gets lost
            print("setup server, out:", self.server_pid, "dockerid", self.out)

        while True:
            try:
                self.game = CarlaClient(self.host, self.port, timeout=99999999)
                self.game.connect(
                    connection_attempts=100
                )  # It's taking a very long time for the server process to spawn, so the client needs to wait or try sufficient no. of times lol
                self.game.load_settings(CarlaSettings())
                self.game.start_episode(0)

                self.is_game_setup = self.server and self.game

                print("setup client")
                # self.out = shell("docker ps -q")
                # print("dockerid", self.out)

                return
            except TCPConnectionError as error:
                print(error)
                time.sleep(1)

    def close_client_and_server(self):
        self._close_server()
        print("Disconnecting the client")
        self.game.disconnect()
        self.game = None
        self.server = None
        self.is_game_setup = False
        return

    def _open_server(self):
        # Note: There is no way to disable rendering in CARLA as of now
        # https://github.com/carla-simulator/carla/issues/286
        # decrease the window resolution if you want to see if performance increases
        # Command: $CARLA_ROOT/CarlaUE4.sh /Game/Maps/Town02 -benchmark -carla-server -fps=15 -world-port=9876 -windowed -ResX=480 -ResY=360 -carla-no-hud
        # To run off_screen: SDL_VIDEODRIVER=offscreen SDL_HINT_CUDA_DEVICE=0 <command> #https://github.com/carla-simulator/carla/issues/225
        my_env = None
        if self.run_offscreen:
            my_env = {
                **os.environ,
                'SDL_VIDEODRIVER': 'offscreen',
                'SDL_HINT_CUDA_DEVICE': '0',
            }
        with open(self.log_path, "wb") as out:
            #docker <19.03
            # p = subprocess.Popen(['docker', 'run', '--rm', '-d', '-p',
            #                        str(self.port) + '-' + str(self.port + 2) + ':' + str(self.port) + '-' + str(self.port + 2),
            #                        '--runtime=nvidia', '-e', 'NVIDIA_VISIBLE_DEVICES='+str(self.gpu), "carlasim/carla:0.8.4",
            #                        '/bin/bash', 'CarlaUE4.sh', self.map, '-windowed',
            #                        '-benchmark', '-fps=10', '-world-port=' + str(self.port)], shell=False,
            #                       stdout=subprocess.PIPE)

            #docker=19.03
            p = subprocess.Popen([
                'docker', 'run', '--rm', '-d', '-p',
                str(self.port) + '-' + str(self.port + 2) + ':' +
                str(self.port) + '-' + str(self.port + 2),
                '--gpus=' + str(self.gpu), "carlasim/carla:0.8.4", '/bin/bash',
                'CarlaUE4.sh', self.map, '-windowed', '-benchmark', '-fps=10',
                '-world-port=' + str(self.port)
            ],
                                 shell=False,
                                 stdout=subprocess.PIPE)

            # docker_id = shell("docker ps -q")
            docker_out, err = p.communicate()

            # cmd = [path.join(environ.get('CARLA_ROOT'), 'CarlaUE4.sh'), self.map,
            #        "-benchmark", "-carla-server", "-fps=10", "-world-port={}".format(self.port),
            #        "-windowed -ResX={} -ResY={}".format(carla_config.server_width, carla_config.server_height),
            #        "-carla-no-hud"]
            # p = subprocess.Popen(cmd, stdout=out, stderr=out, env=my_env, preexec_fn=os.setsid)
        # p = subprocess.Popen(cmd, env=my_env, preexec_fn=os.setsid)
        return p, docker_out

    def _close_server(self):
        if self.kill_when_connection_lost:
            print("kill before")
            # os.kill(os.getpgid(self.server.pid), signal.SIGKILL)
            # self.server.kill()
            print(self.out)
            subprocess.call(['docker', 'stop', self.out[:-1]])

            while True:
                cmd = shell("docker ps -q")
                if cmd == "":
                    break

            print("kill after")
            return
        # no_of_attempts = 0
        # while is_process_alive(self.server_pid):
        #     try:
        #         self.server.terminate()
        #         self.server.wait()
        #         os.killpg(self.server.pid, signal.SIGTERM)
        #         time.sleep(10)
        #     except Exception as error:
        #         print(error)

    def _get_directions(self, current_point, end_point):
        """
        Class that should return the directions to reach a certain goal
        """

        directions = self.planner.get_next_command(
            (current_point.location.x, current_point.location.y, 0.22),
            (current_point.orientation.x, current_point.orientation.y,
             current_point.orientation.z),
            (end_point.location.x, end_point.location.y, 0.22),
            (end_point.orientation.x, end_point.orientation.y,
             end_point.orientation.z))
        return directions

    def _update_state(self):
        # get measurements and observations
        measurements = []
        while type(measurements) == list:
            try:
                measurements, sensor_data = self.game.read_data()

            except:

                print(
                    "Connection to server lost while reading state. Reconnecting..........."
                )
                # self.game.disconnect()
                # self.setup_client_and_server(reconnect_client_only=True)
                import psutil
                info = psutil.virtual_memory()
                print("memory used", str(info.percent))

                self.close_client_and_server()
                self.setup_client_and_server(reconnect_client_only=False)

                scene = self.game.load_settings(self.cur_experiment.conditions)
                self.positions = scene.player_start_spots
                self.start_point = self.positions[self.pose[0]]
                self.end_point = self.positions[self.pose[1]]
                self.game.start_episode(self.pose[0])

                self.done = True

        current_point = measurements.player_measurements.transform
        direction = self._get_directions(current_point, self.end_point)
        speed = measurements.player_measurements.forward_speed

        self.reward = 0
        dist = sldist([current_point.location.x, current_point.location.y],
                      [self.end_point.location.x, self.end_point.location.y])

        if direction == 5:  #go straight
            if abs(self.control.steer) > 0.2:
                self.reward -= 20

            self.reward += min(35, speed * 3.6)
        elif direction == 2:  #follow lane
            self.reward += min(35, speed * 3.6)
        elif direction == 3:  #turn left ,steer negtive
            if self.control.steer > 0:
                self.reward -= 15
            if speed * 3.6 <= 20:
                self.reward += speed * 3.6
            else:
                self.reward += 40 - speed * 3.6
        elif direction == 4:  #turn right
            if self.control.steer < 0:
                self.reward -= 15
            if speed * 3.6 <= 20:
                self.reward += speed * 3.6
            else:
                self.reward += 40 - speed * 3.6
        elif direction == 0:
            self.done = True
            self.reward += 100
            direction = 2
            print("success", dist)
        else:
            print("error direction")
            direction = 5

        direction -= 2
        direction = int(direction)

        intersection_offroad = measurements.player_measurements.intersection_offroad
        intersection_otherlane = measurements.player_measurements.intersection_otherlane
        collision_veh = measurements.player_measurements.collision_vehicles
        collision_ped = measurements.player_measurements.collision_pedestrians
        collision_other = measurements.player_measurements.collision_other

        if intersection_otherlane > 0 or intersection_offroad > 0 or collision_ped > 0 or collision_veh > 0:
            self.reward -= 100
        elif collision_other > 0:
            self.reward -= 50

        if collision_other > 0 or collision_veh > 0 or collision_ped > 0:
            self.done = True
        if intersection_offroad > 0.2 or intersection_otherlane > 0.2:
            self.done = True
        if speed * 3.6 <= 1:
            self.nospeed_time += 1
            if self.nospeed_time > 100:
                self.done = True
            self.reward -= 1
        else:
            self.nospeed_time = 0

        # speed = min(1, speed/10.0)
        speed = speed / 25

        # print(measurements)
        # print(sensor_data)
        self.observation = {
            "img": self.process_image(sensor_data['CameraRGB'].data),
            "speed": speed,
            "direction": direction
        }
        return self.observation, self.reward, self.done

    def encode_obs(self, image):
        if self.pre_image is None:
            self.pre_image = image

        img = np.concatenate([self.pre_image, image], axis=2)
        self.pre_image = image
        return img

    def process_image(self, data):
        rgb_img = data[115:510, :]
        rgb_img = cv2.resize(rgb_img, (200, 88), interpolation=cv2.INTER_AREA)
        if not self.channel_last:
            rgb_img = np.transpose(rgb_img, (2, 0, 1))
        # return cv2.resize(
        #     data, (80, 80),
        #     interpolation=cv2.INTER_AREA)
        return rgb_img

    def _take_action(self, action_idx):
        if not self.is_game_setup:
            print("Reset the environment duh by reset() before calling step()")
            sys.exit(1)
        if type(action_idx) == np.int64 or type(action_idx) == np.int:
            action = self.actions[action_idx]
        else:
            action = action_idx

        self.control = VehicleControl()

        if len(action) == 3:
            self.control.throttle = np.clip(action[1], 0, 1)
            self.control.steer = np.clip(action[0], -1, 1)
            self.control.brake = np.clip(action[2], 0, 1)
        else:
            self.control.throttle = np.clip(action[0], 0, 1)
            self.control.steer = np.clip(action[1], -1, 1)
            self.control.brake = np.abs(np.clip(action[0], -1, 0))

        if not self.allow_braking:
            self.control.brake = 0
        self.control.hand_brake = False
        self.control.reverse = False
        controls_sent = False
        while not controls_sent:
            try:
                self.game.send_control(self.control)
                controls_sent = True
                # print(self.control)
                # #
                # rand = random.randint(0, 4)
                # if rand == 0 and self.first_debug:
                #     self.first_debug = False
                #     raise Exception
            except:
                print(
                    "Connection to server lost while sending controls. Reconnecting..........."
                )
                import psutil
                info = psutil.virtual_memory()
                print("memory used", str(info.percent))

                self.close_client_and_server()
                self.setup_client_and_server(reconnect_client_only=False)

                scene = self.game.load_settings(self.cur_experiment.conditions)
                self.positions = scene.player_start_spots
                self.start_point = self.positions[self.pose[0]]
                self.end_point = self.positions[self.pose[1]]
                self.game.start_episode(self.pose[0])

                self.done = True
                controls_sent = False
        return

    def _restart_environment_episode(self, force_environment_reset=True):

        if not force_environment_reset and not self.done and self.is_game_setup:
            print("Can't reset dude, episode ain't over yet")
            return None  # User should handle this
        self.is_game_ready_for_input = False
        if not self.is_game_setup:
            self.setup_client_and_server()

        experiment_type = random.randint(0, 0)
        self.cur_experiment = self.experiments[experiment_type]
        self.pose = random.choice(self.cur_experiment.poses)
        # self.cur_experiment = self.experiments[0]
        # self.pose = self.cur_experiment.poses[0]
        scene = self.game.load_settings(self.cur_experiment.conditions)
        self.positions = scene.player_start_spots
        self.start_point = self.positions[self.pose[0]]
        self.end_point = self.positions[self.pose[1]]

        # print("start episode")
        while True:
            try:
                self.game.start_episode(self.pose[0])

                # start the game with some initial speed
                self.car_speed = 0
                self.nospeed_time = 0
                observation = None
                for i in range(self.num_speedup_steps):
                    observation, reward, done, _ = self.step([0, 1.0, 0])
                self.observation = observation
                self.is_game_ready_for_input = True
                break

            except Exception as error:
                print(error)
                self.game.connect()
                self.game.start_episode(self.pose[0])

        return observation
Esempio n. 7
0
class DrivingBenchmark(object):
    """
    The Benchmark class, controls the execution of the benchmark interfacing
    an Agent class with a set Suite.


    The benchmark class must be inherited with a class that defines the
    all the experiments to be run by the agent
    """
    def __init__(self,
                 city_name='Town01',
                 name_to_save='Test',
                 continue_experiment=False,
                 save_images=False,
                 distance_for_success=2.0):
        """
        Args
            city_name:
            name_to_save:
            continue_experiment:
            save_images:
            distance_for_success:
            collisions_as_failure: if this flag is set to true, episodes will terminate as failure, when the car collides.
        """

        self.__metaclass__ = abc.ABCMeta

        self._city_name = city_name
        self._base_name = name_to_save
        # The minimum distance for arriving into the goal point in
        # order to consider ir a success
        self._distance_for_success = distance_for_success
        # The object used to record the benchmark and to able to continue after
        self._recording = Recording(name_to_save=name_to_save,
                                    continue_experiment=continue_experiment,
                                    save_images=save_images)

        # We have a default planner instantiated that produces high level commands
        self._planner = Planner(city_name)
        self._map = self._planner._city_track.get_map()

        # TO keep track of the previous collisions
        self._previous_pedestrian_collision = 0
        self._previous_vehicle_collision = 0
        self._previous_other_collision = 0

    def benchmark_agent(self, experiment_suite, agent, client):
        """
        Function to benchmark the agent.
        It first check the log file for this benchmark.
        if it exist it continues from the experiment where it stopped.


        Args:
            experiment_suite
            agent: an agent object with the run step class implemented.
            client:


        Return:
            A dictionary with all the metrics computed from the
            agent running the set of experiments.
        """

        # Instantiate a metric object that will be used to compute the metrics for
        # the benchmark afterwards.
        metrics_object = Metrics(experiment_suite.metrics_parameters,
                                 experiment_suite.dynamic_tasks)

        # Function return the current pose and task for this benchmark.
        start_pose, start_experiment = self._recording.get_pose_and_experiment(
            experiment_suite.get_number_of_poses_task())

        logging.info('START')

        for experiment in experiment_suite.get_experiments(
        )[int(start_experiment):]:

            positions = client.load_settings(
                experiment.conditions).player_start_spots

            self._recording.log_start(experiment.task)

            for pose in experiment.poses[start_pose:]:
                for rep in range(experiment.repetitions):

                    start_index = pose[0]
                    end_index = pose[1]

                    client.start_episode(start_index)
                    # Print information on
                    logging.info('======== !!!! ==========')
                    logging.info(' Start Position %d End Position %d ',
                                 start_index, end_index)

                    self._recording.log_poses(start_index, end_index,
                                              experiment.Conditions.WeatherId)

                    # Calculate the initial distance for this episode
                    initial_distance = \
                        sldist(
                            [positions[start_index].location.x, positions[start_index].location.y],
                            [positions[end_index].location.x, positions[end_index].location.y])

                    time_out = experiment_suite.calculate_time_out(
                        self._get_shortest_path(positions[start_index],
                                                positions[end_index]))

                    logging.info('Timeout for Episode: %f', time_out)
                    # running the agent
                    (result, reward_vec, control_vec, final_time, remaining_distance, col_ped,
                     col_veh, col_oth, number_of_red_lights, number_of_green_lights) = \
                        self._run_navigation_episode(
                            agent, client, time_out, positions[end_index],
                            str(experiment.Conditions.WeatherId) + '_'
                            + str(experiment.task) + '_' + str(start_index)
                            + '.' + str(end_index), experiment_suite.metrics_parameters,
                            experiment_suite.collision_as_failure,
                            experiment_suite.traffic_light_as_failure)

                    # Write the general status of the just ran episode
                    self._recording.write_summary_results(
                        experiment, pose, rep, initial_distance,
                        remaining_distance, final_time, time_out, result,
                        col_ped, col_veh, col_oth, number_of_red_lights,
                        number_of_green_lights)

                    # Write the details of this episode.
                    self._recording.write_measurements_results(
                        experiment, rep, pose, reward_vec, control_vec)
                    if result > 0:
                        logging.info(
                            '+++++ Target achieved in %f seconds! +++++',
                            final_time)
                    else:
                        logging.info('----- Timeout! -----')

            start_pose = 0

        self._recording.log_end()

        return metrics_object.compute(self._recording.path)

    def get_path(self):
        """
        Returns the path were the log was saved.
        """
        return self._recording.path

    def _get_directions(self, current_point, end_point):
        """
        Class that should return the directions to reach a certain goal
        """

        directions = self._planner.get_next_command(
            (current_point.location.x, current_point.location.y, 0.22),
            (current_point.orientation.x, current_point.orientation.y,
             current_point.orientation.z),
            (end_point.location.x, end_point.location.y, 0.22),
            (end_point.orientation.x, end_point.orientation.y,
             end_point.orientation.z))
        return directions

    def _get_shortest_path(self, start_point, end_point):
        """
        Calculates the shortest path between two points considering the road netowrk
        """

        return self._planner.get_shortest_path_distance(
            [start_point.location.x, start_point.location.y, 0.22],
            [start_point.orientation.x, start_point.orientation.y, 0.22],
            [end_point.location.x, end_point.location.y, end_point.location.z],
            [
                end_point.orientation.x, end_point.orientation.y,
                end_point.orientation.z
            ])

    def _has_agent_collided(self, measurement, metrics_parameters):
        """
            This function must have a certain state and only look to one measurement.
        """
        collided_veh = 0
        collided_ped = 0
        collided_oth = 0

        if (measurement.collision_vehicles - self._previous_vehicle_collision) \
                > metrics_parameters['collision_vehicles']['threshold']/2.0:
            collided_veh = 1
        if (measurement.collision_pedestrians - self._previous_pedestrian_collision) \
                > metrics_parameters['collision_pedestrians']['threshold']/2.0:
            collided_ped = 1
        if (measurement.collision_other - self._previous_other_collision) \
                > metrics_parameters['collision_other']['threshold']/2.0:
            collided_oth = 1

        self._previous_pedestrian_collision = measurement.collision_pedestrians
        self._previous_vehicle_collision = measurement.collision_vehicles
        self._previous_other_collision = measurement.collision_other

        return collided_ped, collided_veh, collided_oth

    def _is_traffic_light_active(self, agent, orientation):

        x_agent = agent.traffic_light.transform.location.x
        y_agent = agent.traffic_light.transform.location.y

        def search_closest_lane_point(x_agent, y_agent, depth):
            step_size = 4
            if depth > 1:
                return None
            try:
                degrees = self._map.get_lane_orientation_degrees(
                    [x_agent, y_agent, 38])
                #print (degrees)
            except:
                return None

            if not self._map.is_point_on_lane([x_agent, y_agent, 38]):
                #print (" Not on lane ")
                result = search_closest_lane_point(x_agent + step_size,
                                                   y_agent, depth + 1)
                if result is not None:
                    return result
                result = search_closest_lane_point(x_agent,
                                                   y_agent + step_size,
                                                   depth + 1)
                if result is not None:
                    return result
                result = search_closest_lane_point(x_agent + step_size,
                                                   y_agent + step_size,
                                                   depth + 1)
                if result is not None:
                    return result
                result = search_closest_lane_point(x_agent + step_size,
                                                   y_agent - step_size,
                                                   depth + 1)
                if result is not None:
                    return result
                result = search_closest_lane_point(x_agent - step_size,
                                                   y_agent + step_size,
                                                   depth + 1)
                if result is not None:
                    return result
                result = search_closest_lane_point(x_agent - step_size,
                                                   y_agent, depth + 1)
                if result is not None:
                    return result
                result = search_closest_lane_point(x_agent,
                                                   y_agent - step_size,
                                                   depth + 1)
                if result is not None:
                    return result
                result = search_closest_lane_point(x_agent - step_size,
                                                   y_agent - step_size,
                                                   depth + 1)
                if result is not None:
                    return result
            else:
                #print(" ON Lane ")
                if degrees < 6:
                    return [x_agent, y_agent]
                else:
                    return None

        closest_lane_point = search_closest_lane_point(x_agent, y_agent, 0)
        car_direction = math.atan2(orientation.y, orientation.x) + 3.1415
        if car_direction > 6.0:
            car_direction -= 6.0

        return math.fabs(
            car_direction - self._map.get_lane_orientation_degrees(
                [closest_lane_point[0], closest_lane_point[1], 38])) < 1

    def _test_for_traffic_lights(self, measurement):
        """

        This function tests if the car passed into a traffic light, returning 'red'
        if it crossed a red light , 'green' if it crossed a green light or none otherwise

        Args:
            measurement: all the measurements collected by carla 0.8.4

        Returns:

        """
        def is_on_burning_point(_map, location):

            # We get the current lane orientation
            ori_x, ori_y = _map.get_lane_orientation(
                [location.x, location.y, 38])

            # We test to walk in direction of the lane
            future_location_x = location.x
            future_location_y = location.y

            for i in range(3):
                future_location_x += ori_x
                future_location_y += ori_y
            # Take a point on a intersection in the future
            location_on_intersection_x = future_location_x + 2 * ori_x
            location_on_intersection_y = future_location_y + 2 * ori_y

            if not _map.is_point_on_intersection([future_location_x,
                                                  future_location_y,
                                                  38]) and \
                    _map.is_point_on_intersection([location_on_intersection_x,
                                                   location_on_intersection_y,
                                                   38]):
                return True

            return False

        # Check nearest traffic light with the correct orientation state.

        player_x = measurement.player_measurements.transform.location.x
        player_y = measurement.player_measurements.transform.location.y

        # The vehicle is on an intersection
        # THIS IS THE PLACE TO VERIFY FOR A TL BURN

        for agent in measurement.non_player_agents:
            if agent.HasField('traffic_light'):
                if not self._map.is_point_on_intersection(
                    [player_x, player_y, 38]):
                    x_agent = agent.traffic_light.transform.location.x
                    y_agent = agent.traffic_light.transform.location.y
                    tl_vector, tl_dist = get_vec_dist(x_agent, y_agent,
                                                      player_x, player_y)
                    if self._is_traffic_light_active(
                            agent, measurement.player_measurements.transform.
                            orientation):
                        if is_on_burning_point(self._map,
                                               measurement.player_measurements.transform.location)\
                                and tl_dist < 6.0:
                            if agent.traffic_light.state != 0:  # Not green
                                return 'red'
                            else:
                                return 'green'

        return None

    def _run_navigation_episode(self, agent, client, time_out, target,
                                episode_name, metrics_parameters,
                                collision_as_failure,
                                traffic_light_as_failure):
        """
         Run one episode of the benchmark (Pose) for a certain agent.


        Args:
            agent: the agent object
            client: an object of the carla client to communicate
            with the CARLA simulator
            time_out: the time limit to complete this episode
            target: the target to reach
            episode_name: The name for saving images of this episode
            metrics_object: The metrics object to check for collisions

        """

        # Send an initial command.
        measurements, sensor_data = client.read_data()
        client.send_control(VehicleControl())

        initial_timestamp = measurements.game_timestamp
        current_timestamp = initial_timestamp

        # The vector containing all measurements produced on this episode
        measurement_vec = []
        # The vector containing all controls produced on this episode
        control_vec = []
        frame = 0
        distance = 10000
        col_ped, col_veh, col_oth = 0, 0, 0
        traffic_light_state, number_red_lights, number_green_lights = None, 0, 0
        fail = False
        success = False
        not_count = 0

        while not fail and not success:

            # Read data from server with the client
            measurements, sensor_data = client.read_data()
            # The directions to reach the goal are calculated.
            directions = self._get_directions(
                measurements.player_measurements.transform, target)
            # Agent process the data.
            control = agent.run_step(measurements, sensor_data, directions,
                                     target)
            # Send the control commands to the vehicle
            client.send_control(control)

            # save images if the flag is activated
            self._recording.save_images(sensor_data, episode_name, frame)

            current_x = measurements.player_measurements.transform.location.x
            current_y = measurements.player_measurements.transform.location.y

            logging.info("Controller is Inputting:")
            logging.info('Steer = %f Throttle = %f Brake = %f ', control.steer,
                         control.throttle, control.brake)

            current_timestamp = measurements.game_timestamp
            logging.info('Timestamp %f', current_timestamp)
            # Get the distance travelled until now

            distance = sldist([current_x, current_y],
                              [target.location.x, target.location.y])
            # Write status of the run on verbose mode
            logging.info('Status:')
            logging.info('[d=%f] c_x = %f, c_y = %f ---> t_x = %f, t_y = %f',
                         float(distance), current_x, current_y,
                         target.location.x, target.location.y)
            # Check if reach the target
            col_ped, col_veh, col_oth = self._has_agent_collided(
                measurements.player_measurements, metrics_parameters)
            # test if car crossed the traffic light
            traffic_light_state = self._test_for_traffic_lights(measurements)

            if traffic_light_state == 'red' and not_count == 0:
                number_red_lights += 1
                not_count = 20

            elif traffic_light_state == 'green' and not_count == 0:
                number_green_lights += 1
                not_count = 20

            else:
                not_count -= 1
                not_count = max(0, not_count)

            if distance < self._distance_for_success:
                success = True
            elif (current_timestamp - initial_timestamp) > (time_out * 1000):
                fail = True
            elif collision_as_failure and (col_ped or col_veh or col_oth):
                fail = True
            elif traffic_light_as_failure and traffic_light_state == 'red':
                fail = True
            logging.info('Traffic Lights:')
            logging.info('red %f green %f, total %f', number_red_lights,
                         number_green_lights,
                         number_red_lights + number_green_lights)
            # Increment the vectors and append the measurements and controls.
            frame += 1
            measurement_vec.append(measurements.player_measurements)
            control_vec.append(control)

        if success:
            return 1, measurement_vec, control_vec, float(
                current_timestamp - initial_timestamp) / 1000.0, distance,  col_ped, col_veh, col_oth, \
                   number_red_lights, number_green_lights
        return 0, measurement_vec, control_vec, time_out, distance, col_ped, col_veh, col_oth, \
            number_red_lights, number_green_lights
Esempio n. 8
0
class CarlaEnv(object):
    '''
        An OpenAI Gym Environment for CARLA.
    '''
    def __init__(self,
                 obs_converter,
                 action_converter,
                 env_id,
                 random_seed=0,
                 exp_suite_name='TrainingSuite',
                 reward_class_name='RewardCarla',
                 host='127.0.0.1',
                 port=2000,
                 city_name='Town01',
                 subset=None,
                 video_every=100,
                 video_dir='./video/',
                 distance_for_success=2.0,
                 benchmark=False):

        self.logger = get_carla_logger()
        self.logger.info('Environment {} running in port {}'.format(
            env_id, port))
        self.host, self.port = host, port
        self.id = env_id
        self._obs_converter = obs_converter
        self.observation_space = obs_converter.get_observation_space()
        self._action_converter = action_converter
        self.action_space = self._action_converter.get_action_space()
        if benchmark:
            self._experiment_suite = getattr(experiment_suites_benchmark,
                                             exp_suite_name)(city_name)
        else:
            self._experiment_suite = getattr(experiment_suites,
                                             exp_suite_name)(city_name, subset)
        self._reward = getattr(rewards, reward_class_name)()
        self._experiments = self._experiment_suite.get_experiments()
        self.subset = subset
        self._make_carla_client(host, port)
        self._distance_for_success = distance_for_success
        self._planner = Planner(city_name)
        self.done = False
        self.last_obs = None
        self.last_distance_to_goal = None
        self.last_direction = None
        self.last_measurements = None
        np.random.seed(random_seed)
        self.video_every = video_every
        self.video_dir = video_dir
        self.video_writer = None
        self._success = False
        self._failure_timeout = False
        self._failure_collision = False
        self.benchmark = benchmark
        self.benchmark_index = [0, 0, 0]
        try:
            if not os.path.isdir(self.video_dir):
                os.makedirs(self.video_dir)
        except OSError:
            pass
        self.steps = 0
        self.num_episodes = 0

    def step(self, action):

        if self.done:
            raise ValueError(
                'self.done should always be False when calling step')

        while True:

            try:
                # Send control
                control = self._action_converter.action_to_control(
                    action, self.last_measurements)
                self._client.send_control(control)

                # Gather the observations (including measurements, sensor and directions)
                measurements, sensor_data = self._client.read_data()
                self.last_measurements = measurements
                current_timestamp = measurements.game_timestamp
                distance_to_goal = self._get_distance_to_goal(
                    measurements, self._target)
                self.last_distance_to_goal = distance_to_goal
                directions = self._get_directions(
                    measurements.player_measurements.transform, self._target)
                self.last_direction = directions
                obs = self._obs_converter.convert(measurements, sensor_data,
                                                  directions, self._target,
                                                  self.id)

                if self.video_writer is not None and self.steps % 2 == 0:
                    self._raster_frame(sensor_data, measurements, directions,
                                       obs)

                self.last_obs = obs

            except CameraException:
                self.logger.debug('Camera Exception in step()')
                obs = self.last_obs
                distance_to_goal = self.last_distance_to_goal
                current_timestamp = self.last_measurements.game_timestamp

            except TCPConnectionError as e:
                self.logger.debug(
                    'TCPConnectionError inside step(): {}'.format(e))
                self.done = True
                return self.last_obs, 0.0, True, {'carla-reward': 0.0}

            break

        # Check if terminal state
        timeout = (current_timestamp -
                   self._initial_timestamp) > (self._time_out * 1000)
        collision, _ = self._is_collision(measurements)
        success = distance_to_goal < self._distance_for_success
        if timeout:
            self.logger.debug('Timeout')
            self._failure_timeout = True
        if collision:
            self.logger.debug('Collision')
            self._failure_collision = True
        if success:
            self.logger.debug('Success')
        self.done = timeout or collision or success

        # Get the reward
        env_state = {
            'timeout': timeout,
            'collision': collision,
            'success': success
        }
        reward = self._reward.get_reward(measurements, self._target,
                                         self.last_direction, control,
                                         env_state)

        # Additional information
        info = {'carla-reward': reward}

        self.steps += 1

        return obs, reward, self.done, info

    def reset(self):

        # Loop forever due to TCPConnectionErrors
        while True:
            try:
                self._reward.reset_reward()
                self.done = False
                if self.video_writer is not None:
                    try:
                        self.video_writer.close()
                    except Exception as e:
                        self.logger.debug(
                            'Error when closing video writer in reset')
                        self.logger.error(e)
                    self.video_writer = None
                if self.benchmark:
                    end_indicator = self._new_episode_benchmark()
                    if end_indicator is False:
                        return False
                else:
                    self._new_episode()
                # Hack: Try sleeping so that the server is ready. Reduces the number of TCPErrors
                time.sleep(4)
                # measurements, sensor_data = self._client.read_data()
                self._client.send_control(VehicleControl())
                measurements, sensor_data = self._client.read_data()
                self._initial_timestamp = measurements.game_timestamp
                self.last_measurements = measurements
                self.last_distance_to_goal = self._get_distance_to_goal(
                    measurements, self._target)
                directions = self._get_directions(
                    measurements.player_measurements.transform, self._target)
                self.last_direction = directions
                obs = self._obs_converter.convert(measurements, sensor_data,
                                                  directions, self._target,
                                                  self.id)
                self.last_obs = obs
                self.done = False
                self._success = False
                self._failure_timeout = False
                self._failure_collision = False
                return obs

            except CameraException:
                self.logger.debug('Camera Exception in reset()')
                continue

            except TCPConnectionError as e:
                self.logger.debug('TCPConnectionError in reset()')
                self.logger.error(e)
                # Disconnect and reconnect
                self.disconnect()
                time.sleep(5)
                self._make_carla_client(self.host, self.port)

    def disconnect(self):

        if self.video_writer is not None:
            try:
                self.video_writer.close()
            except Exception as e:
                self.logger.debug(
                    'Error when closing video writer in disconnect')
                self.logger.error(e)
            self.video_writer = None

        self._client.disconnect()

    def _raster_frame(self, sensor_data, measurements, directions, obs):

        frame = sensor_data['CameraRGB'].data.copy()
        cv2.putText(frame,
                    text='Episode number: {:,}'.format(self.num_episodes - 1),
                    org=(50, 50),
                    fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                    fontScale=1.0,
                    color=[0, 0, 0],
                    thickness=2)
        cv2.putText(frame,
                    text='Environment steps: {:,}'.format(self.steps),
                    org=(50, 80),
                    fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                    fontScale=1.0,
                    color=[0, 0, 0],
                    thickness=2)

        REACH_GOAL = 0.0
        GO_STRAIGHT = 5.0
        TURN_RIGHT = 4.0
        TURN_LEFT = 3.0
        LANE_FOLLOW = 2.0
        if np.isclose(directions, REACH_GOAL):
            dir_str = 'REACH GOAL'
        elif np.isclose(directions, GO_STRAIGHT):
            dir_str = 'GO STRAIGHT'
        elif np.isclose(directions, TURN_RIGHT):
            dir_str = 'TURN RIGHT'
        elif np.isclose(directions, TURN_LEFT):
            dir_str = 'TURN LEFT'
        elif np.isclose(directions, LANE_FOLLOW):
            dir_str = 'LANE FOLLOW'
        else:
            raise ValueError(directions)
        cv2.putText(frame,
                    text='Direction: {}'.format(dir_str),
                    org=(50, 110),
                    fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                    fontScale=1.0,
                    color=[0, 0, 0],
                    thickness=2)
        cv2.putText(frame,
                    text='Speed: {:.02f}'.format(
                        measurements.player_measurements.forward_speed * 3.6),
                    org=(50, 140),
                    fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                    fontScale=1.0,
                    color=[0, 0, 0],
                    thickness=2)
        cv2.putText(frame,
                    text='rel_x: {:.02f}, rel_y: {:.02f}'.format(
                        obs['v'][-2].item(), obs['v'][-1].item()),
                    org=(50, 170),
                    fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                    fontScale=1.0,
                    color=[0, 0, 0],
                    thickness=2)
        self.video_writer.writeFrame(frame)

    def _get_distance_to_goal(self, measurements, target):

        current_x = measurements.player_measurements.transform.location.x
        current_y = measurements.player_measurements.transform.location.y
        distance_to_goal = np.linalg.norm(np.array([current_x, current_y]) - \
                            np.array([target.location.x, target.location.y]))
        return distance_to_goal

    def _new_episode(self):
        experiment_idx = np.random.randint(0, len(self._experiments))
        experiment = self._experiments[experiment_idx]
        exp_settings = experiment.conditions
        exp_settings.set(QualityLevel='Low')
        positions = self._client.load_settings(exp_settings).player_start_spots
        idx_pose = np.random.randint(0, len(experiment.poses))
        pose = experiment.poses[idx_pose]
        self.logger.info('Env {} gets experiment {} with pose {}'.format(
            self.id, experiment_idx, idx_pose))
        start_index = pose[0]
        end_index = pose[1]
        self._client.start_episode(start_index)
        self._time_out = self._experiment_suite.calculate_time_out(
            self._get_shortest_path(positions[start_index],
                                    positions[end_index]))
        self._target = positions[end_index]
        self._episode_name = str(experiment.Conditions.WeatherId) + '_' \
                            + str(experiment.task) + '_' + str(start_index) \
                            + '_' + str(end_index)

        if ((self.num_episodes % self.video_every) == 0) and (self.id == 0):
            video_path = os.path.join(
                self.video_dir, '{:08d}_'.format(self.num_episodes) +
                self._episode_name + '.mp4')
            self.logger.info('Writing video at {}'.format(video_path))
            self.video_writer = skvideo.io.FFmpegWriter(
                video_path, inputdict={'-r': '30'}, outputdict={'-r': '30'})
        else:
            self.video_writer = None

        self.num_episodes += 1

    def _new_episode_benchmark(self):
        experiment_idx_past = self.benchmark_index[0]
        pose_idx_past = self.benchmark_index[1]
        repetition_idx_past = self.benchmark_index[2]

        experiment_past = self._experiments[experiment_idx_past]
        poses_past = experiment_past.poses[0:]
        repetition_past = experiment_past.repetitions

        if repetition_idx_past == repetition_past:
            if pose_idx_past == len(poses_past) - 1:
                if experiment_idx_past == len(self._experiments) - 1:
                    return False
                else:
                    experiment = self._experiments[experiment_idx_past + 1]
                    pose = experiment.poses[0:][0]
                    self.benchmark_index = [experiment_idx_past + 1, 0, 1]
            else:
                experiment = experiment_past
                pose = poses_past[pose_idx_past + 1]
                self.benchmark_index = [
                    experiment_idx_past, pose_idx_past + 1, 1
                ]
        else:
            experiment = experiment_past
            pose = poses_past[pose_idx_past]
            self.benchmark_index = [
                experiment_idx_past, pose_idx_past, repetition_idx_past + 1
            ]
        exp_settings = experiment.Conditions
        exp_settings.set(QualityLevel='Low')
        positions = self._client.load_settings(exp_settings).player_start_spots
        start_index = pose[0]
        end_index = pose[1]
        self._client.start_episode(start_index)
        self._time_out = self._experiment_suite.calculate_time_out(
            self._get_shortest_path(positions[start_index],
                                    positions[end_index]))
        self._target = positions[end_index]
        self._episode_name = str(experiment.Conditions.WeatherId) + '_' \
                            + str(experiment.task) + '_' + str(start_index) \
                            + '_' + str(end_index)
        if ((self.num_episodes % self.video_every) == 0) and (self.id == 0):
            video_path = os.path.join(
                self.video_dir, '{:08d}_'.format(self.num_episodes) +
                self._episode_name + '.mp4')
            self.logger.info('Writing video at {}'.format(video_path))
            self.video_writer = skvideo.io.FFmpegWriter(
                video_path, inputdict={'-r': '30'}, outputdict={'-r': '30'})
        else:
            self.video_writer = None

        self.num_episodes += 1

    def _get_directions(self, current_point, end_point):

        directions = self._planner.get_next_command(
            (current_point.location.x, current_point.location.y, 0.22),
            (current_point.orientation.x, current_point.orientation.y,
             current_point.orientation.z),
            (end_point.location.x, end_point.location.y, 0.22),
            (end_point.orientation.x, end_point.orientation.y,
             end_point.orientation.z))
        return directions

    def _get_shortest_path(self, start_point, end_point):

        return self._planner.get_shortest_path_distance(
            [start_point.location.x, start_point.location.y, 0.22],
            [start_point.orientation.x, start_point.orientation.y, 0.22],
            [end_point.location.x, end_point.location.y, end_point.location.z],
            [
                end_point.orientation.x, end_point.orientation.y,
                end_point.orientation.z
            ])

    @staticmethod
    def _is_collision(measurements):

        c = 0
        c += measurements.player_measurements.collision_vehicles
        c += measurements.player_measurements.collision_pedestrians
        c += measurements.player_measurements.collision_other

        sidewalk_intersection = measurements.player_measurements.intersection_offroad

        otherlane_intersection = measurements.player_measurements.intersection_otherlane

        return (c > 1e-9) or (sidewalk_intersection >
                              0.01) or (otherlane_intersection > 0.9), c

    def _make_carla_client(self, host, port):

        while True:
            try:
                self.logger.info(
                    "Trying to make client on port {}".format(port))
                self._client = CarlaClient(host, port, timeout=100)
                self._client.connect()
                self._client.load_settings(CarlaSettings(QualityLevel='Low'))
                self._client.start_episode(0)
                self.logger.info(
                    "Successfully made client on port {}".format(port))
                break
            except TCPConnectionError as error:
                self.logger.debug('Got TCPConnectionError..sleeping for 1')
                self.logger.error(error)
                time.sleep(1)
Esempio n. 9
0
class CarlaEnvironment(EnvironmentInterface):
    def __init__(self,
                 experiment_path=None,
                 frame_skip=1,
                 server_height=512,
                 server_width=720,
                 camera_height=88,
                 camera_width=200,
                 experiment_suite=None,
                 quality="low",
                 cameras=[CameraTypes.FRONT],
                 weather_id=[1],
                 episode_max_time=30000,
                 max_speed=35.0,
                 port=2000,
                 map_name="Town01",
                 verbose=True,
                 seed=None,
                 is_rendered=True,
                 num_speedup_steps=30,
                 separate_actions_for_throttle_and_brake=False,
                 rendred_image_type='forward_camera'):

        self.frame_skip = frame_skip  # the frame skip affects the fps of the server directly. fps = 30 / frameskip
        self.server_height = server_height
        self.server_width = server_width
        self.camera_height = camera_height
        self.camera_width = camera_width
        self.experiment_suite = experiment_suite  # an optional CARLA experiment suite to use
        self.quality = quality
        self.cameras = cameras
        self.weather_id = weather_id
        self.episode_max_time = episode_max_time  # miliseconds for each episode
        self.max_speed = max_speed  # km/h
        self.port = port
        self.host = 'localhost'
        self.map_name = map_name
        self.map_path = map_path_mapper[self.map_name]
        self.experiment_path = experiment_path
        self.current_episode_steps_counter = 1
        # client configuration
        self.verbose = verbose
        self.episode_idx = 0
        self.num_speedup_steps = num_speedup_steps
        self.max_speed = max_speed
        self.is_rendered = is_rendered
        # setup server settings
        self.experiment_suite = experiment_suite
        self.separate_actions_for_throttle_and_brake = separate_actions_for_throttle_and_brake
        self.rendred_image_type = rendred_image_type
        self.left_poses = []
        self.right_poses = []
        self.follow_poses = []
        self.Straight_poses = []

        self.settings = CarlaSettings()
        self.settings.set(SynchronousMode=True,
                          SendNonPlayerAgentsInfo=False,
                          NumberOfVehicles=15,
                          NumberOfPedestrians=30,
                          WeatherId=self.weather_id,
                          QualityLevel=self.quality,
                          SeedVehicles=seed,
                          SeedPedestrians=seed)
        if seed is None:
            self.settings.randomize_seeds()

        self.settings = self.add_cameras(self.settings, self.cameras,
                                         self.camera_width, self.camera_height)

        # open the server
        self.server = self.open_server()
        logging.disable(40)
        print("Successfully opened the server")

        # open the client
        self.game = CarlaClient(self.host, self.port, timeout=99999999)
        print("Successfully opened the client")

        self.game.connect()
        print("Successfull Connection")

        if self.experiment_suite:
            self.current_experiment_idx = 0
            self.current_experiment = self.experiment_suite.get_experiments()[
                self.current_experiment_idx]
            self.scene = self.game.load_settings(
                self.current_experiment.conditions)
        else:
            self.scene = self.game.load_settings(self.settings)

        # get available start positions
        self.positions = self.scene.player_start_spots
        self.num_positions = len(self.positions)
        self.current_start_position_idx = 0
        self.current_pose = 0

        self.action_space = ActionSpace(
            shape=2,
            low=np.array([-1, -1]),
            high=np.array([1, 1]),
            descriptions=["steer", "gas_and_brake"])

        # state space
        #define all measurments
        self.state_space = {
            "measurements": {
                "forward_speed": np.array([1]),
                "x": np.array([1]),
                "y": np.array([1]),
                "z": np.array([1])
            }
        }
        #define all cameras
        for camera in self.scene.sensors:
            self.state_space[camera.name] = {
                "data": np.array([self.camera_height, self.camera_width, 3])
            }
            print("Define ", camera.name, " Camera")

        # measurements
        self.autopilot = None
        self.planner = Planner(self.map_name)

        #rendering
        if self.is_rendered:
            pygame.init()
            pygame.font.init()
            self.display = pygame.display.set_mode(
                (self.camera_width, self.camera_height))

        # env initialization
        self.reset(True)

    def reset(self, force_environment_reset=False):
        """
        Reset the environment and all the variable of the wrapper

        :param force_environment_reset: forces environment reset even when the game did not end
        :return: A dictionary containing the observation, reward, done flag, action and measurements
        """
        self.reset_ep = True
        self.restart_environment_episode(force_environment_reset)
        self.last_episode_time = time.time()

        if self.current_episode_steps_counter > 0:
            self.episode_idx += 1

        self.done = False
        self.total_reward_in_current_episode = self.reward = 0.0
        self.last_action = 0
        self.current_episode_steps_counter = 0
        self.last_episode_images = []
        self.step([0, 1, 0])

        self.last_env_response = {
            "reward": self.reward,
            "next_state": self.state,
            "goal": self.current_goal,
            "game_over": self.done
        }

        return self.last_env_response

    def add_cameras(self, settings, cameras, camera_width, camera_height):
        # add a front facing camera
        print("Available Cameras are ", cameras)
        if CameraTypes.FRONT in cameras:
            camera = Camera(CameraTypes.FRONT.value)
            camera.set(FOV=100)
            camera.set_image_size(camera_width, camera_height)
            camera.set_position(2.0, 0, 1.4)
            camera.set_rotation(-15.0, 0, 0)
            settings.add_sensor(camera)

        # add a left facing camera
        if CameraTypes.LEFT in cameras:
            camera = Camera(CameraTypes.LEFT.value)
            camera.set(FOV=100)
            camera.set_image_size(camera_width, camera_height)
            camera.set_position(2.0, 0, 1.4)
            camera.set_rotation(-15.0, -30, 0)
            settings.add_sensor(camera)

        # add a right facing camera
        if CameraTypes.RIGHT in cameras:
            camera = Camera(CameraTypes.RIGHT.value)
            camera.set(FOV=100)
            camera.set_image_size(camera_width, camera_height)
            camera.set_position(2.0, 0, 1.4)
            camera.set_rotation(-15.0, 30, 0)
            settings.add_sensor(camera)

        # add a front facing depth camera
        if CameraTypes.DEPTH in cameras:
            camera = Camera(CameraTypes.DEPTH.value)
            camera.set_image_size(camera_width, camera_height)
            camera.set_position(0.2, 0, 1.3)
            camera.set_rotation(8, 30, 0)
            camera.PostProcessing = 'Depth'
            settings.add_sensor(camera)

        # add a front facing semantic segmentation camera
        if CameraTypes.SEGMENTATION in cameras:
            camera = Camera(CameraTypes.SEGMENTATION.value)
            camera.set_image_size(camera_width, camera_height)
            camera.set_position(0.2, 0, 1.3)
            camera.set_rotation(8, 30, 0)
            camera.PostProcessing = 'SemanticSegmentation'
            settings.add_sensor(camera)
            print("Successfully adding a SemanticSegmentation camera")

        return settings

    def get_directions(self, current_point, end_point):
        """
        Class that should return the directions to reach a certain goal
        """

        directions = self.planner.get_next_command(
            (current_point.location.x, current_point.location.y, 0.22),
            (current_point.orientation.x, current_point.orientation.y,
             current_point.orientation.z),
            (end_point.location.x, end_point.location.y, 0.22),
            (end_point.orientation.x, end_point.orientation.y,
             end_point.orientation.z))
        return directions

    def open_server(self):
        log_path = path.join(
            self.experiment_path if self.experiment_path is not None else '.',
            'logs', "CARLA_LOG_{}.txt".format(self.port))
        if not os.path.exists(os.path.dirname(log_path)):
            os.makedirs(os.path.dirname(log_path))
        with open(log_path, "wb") as out:
            cmd = [
                path.join(environ.get('CARLA_ROOT'),
                          'CarlaUE4.sh'), self.map_path, "-benchmark",
                "-carla-server", "-fps={}".format(30 / self.frame_skip),
                "-world-port={}".format(self.port),
                "-windowed -ResX={} -ResY={}".format(self.server_width,
                                                     self.server_height),
                "-carla-no-hud"
            ]
            print("CMD is : ", cmd)

            # if self.config:
            #     cmd.append("-carla-settings={}".format(self.config))
            p = subprocess.Popen(cmd, stdout=out, stderr=out)

        return p

    def close_server(self):
        os.killpg(os.getpgid(self.server.pid), signal.SIGKILL)

    def step(self, action):
        # get measurements and observations
        measurements = []
        while type(measurements) == list:
            measurements, sensor_data = self.game.read_data()
        self.state = {}

        for camera in self.scene.sensors:
            if camera.name == 'segmentation':
                #labels_to_road_noroad taker Sensor.Image not numpy array
                self.state[camera.name] = labels_to_road_noroad(
                    sensor_data[camera.name])
            else:
                self.state[camera.name] = sensor_data[camera.name].data
            #self.state[camera.name] = sensor_data[camera.name].data

        self.location = [
            measurements.player_measurements.transform.location.x,
            measurements.player_measurements.transform.location.y,
            measurements.player_measurements.transform.location.z
        ]

        self.distance_from_goal = np.linalg.norm(
            np.array(self.location[:2]) -
            [self.current_goal.location.x, self.current_goal.location.y])

        is_collision = measurements.player_measurements.collision_vehicles != 0 \
                       or measurements.player_measurements.collision_pedestrians != 0 \
                       or measurements.player_measurements.collision_other != 0

        speed_reward = measurements.player_measurements.forward_speed - 1
        if speed_reward > 30.:
            speed_reward = 30.
        self.reward = speed_reward \
                      - (measurements.player_measurements.intersection_otherlane * 5) \
                      - (measurements.player_measurements.intersection_offroad * 5) \
                      - is_collision * 100 \
                      - np.abs(self.control.steer) * 10

        # update measurements
        #self.measurements = [measurements.player_measurements.forward_speed] + self.location
        #TODO, Add control signals to measurements
        control_signals = [
            np.clip(action[0], -1, 1),
            np.clip(action[1], 0, 1),
            np.clip(action[2], 0, 1)
        ]  #steer, throttle, brake
        self.measurements = [measurements.player_measurements.forward_speed
                             ] + self.location + control_signals

        self.autopilot = measurements.player_measurements.autopilot_control

        # The directions to reach the goal (0 Follow lane, 1 Left, 2 Right, 3 Straight)
        directions = int(
            self.get_directions(measurements.player_measurements.transform,
                                self.current_goal) - 2)

        # if directions == 0:
        #     if self.reset_ep:
        #         self.follow_poses.append((self.current_start,self.current_goal))
        #         self.reset_ep = False
        #     elif directions == 1:
        #         self.left_poses.append((self.current_start,self.current_goal))
        #     elif directions == 2:
        #         self.right_poses.append((self.current_start,self.current_goal))

        # elif directions == 1:
        #     if self.reset_ep:
        #         self.left_poses.append((self.current_start,self.current_goal))
        #         self.reset_ep = False
        # elif directions == 2:
        #     if self.reset_ep:
        #         self.right_poses.append((self.current_start,self.current_goal))
        #         self.reset_ep = False
        # elif directions == 3:
        #     if self.reset_ep:
        #         self.Straight_poses.append((self.current_start,self.current_goal))
        #         self.reset_ep = False
        # else:
        #     self.reset_ep = False

        self.state['high_level_command'] = directions

        if (measurements.game_timestamp >=
                self.episode_max_time) or is_collision:
            self.done = True

        self.state['measurements'] = np.array(self.measurements)

        #prepare rendered image and update display
        if self.is_rendered:
            #check if user wants to close rendering, else continue rendering
            for event in pygame.event.get():
                if event.type == pygame.QUIT:
                    pygame.quit()
                    self.is_rendered = False

            self.surface = pygame.surfarray.make_surface(
                self.state[self.rendred_image_type].swapaxes(0, 1))
            self.display.blit(self.surface, (0, 0))
            pygame.display.flip()

        self.take_action(action)

    def take_action(self, action):
        self.control = VehicleControl()

        # transform the 2 value action (steer, throttle - brake) into a 3 value action (steer, throttle, brake)
        self.control.steer = np.clip(action[0], -1, 1)
        self.control.throttle = np.clip(action[1], 0, 1)
        self.control.brake = np.abs(np.clip(action[2], 0, 1))

        # prevent braking
        # if not self.allow_braking or self.control.brake < 0.1 or self.control.throttle > self.control.brake:
        #     self.control.brake = 0

        # prevent over speeding
        if hasattr(self, 'measurements') and self.measurements[
                0] * 3.6 > self.max_speed and self.control.brake == 0:
            self.control.throttle = 0.0

        self.control.hand_brake = False
        self.control.reverse = False

        self.game.send_control(self.control)

    def load_experiment(self, experiment_idx):
        print("Loading the experiment")
        self.current_experiment = self.experiment_suite.get_experiments(
        )[experiment_idx]
        self.scene = self.game.load_settings(
            self.current_experiment.conditions)
        self.positions = self.scene.player_start_spots
        self.num_positions = len(self.positions)
        self.current_start_position_idx = 0
        self.current_pose = 0

    def restart_environment_episode(self, force_environment_reset=False):
        # select start and end positions
        print("restarting new Episode")
        if self.experiment_suite:
            # if an expeirent suite is available, follow its given poses
            if self.current_pose >= len(self.current_experiment.poses):
                # load a new experiment
                self.current_experiment_idx = (
                    self.current_experiment_idx + 1) % len(
                        self.experiment_suite.get_experiments())
                self.load_experiment(self.current_experiment_idx)

            self.current_start_position_idx = self.current_experiment.poses[
                self.current_pose][0]
            self.current_start = self.positions[self.current_experiment.poses[
                self.current_pose][0]]
            self.current_goal = self.positions[self.current_experiment.poses[
                self.current_pose][1]]
            self.current_pose += 1
        else:
            # go over all the possible positions in a cyclic manner
            self.current_start_position_idx = (
                self.current_start_position_idx + 1) % self.num_positions
            self.current_start = self.positions[
                self.current_start_position_idx]
            # choose a random goal destination
            self.current_goal = random.choice(self.positions)

        try:
            self.game.start_episode(self.current_start_position_idx)
        except:
            self.game.connect()
            self.game.start_episode(self.current_start_position_idx)

        # start the game with some initial speed
        for i in range(self.num_speedup_steps):
            self.control = VehicleControl(throttle=1.0,
                                          brake=0,
                                          steer=0,
                                          hand_brake=False,
                                          reverse=False)
            self.game.send_control(VehicleControl())

    def get_rendered_image(self) -> np.ndarray:
        """
        Return a numpy array containing the image that will be rendered to the screen.
        This can be different from the observation. For example, mujoco's observation is a measurements vector.
        :return: numpy array containing the image that will be rendered to the screen
        """
        image = [self.state[camera.name] for camera in self.scene.sensors]
        image = np.vstack(image)
        return image
Esempio n. 10
0
class DrivingBenchmark(object):
    """
    The Benchmark class, controls the execution of the benchmark interfacing
    an Agent class with a set Suite.


    The benchmark class must be inherited with a class that defines the
    all the experiments to be run by the agent
    """

    def __init__(
            self,
            city_name='Town01',
            name_to_save='Test',
            continue_experiment=False,
            save_images=False,
            distance_for_success=2.0
    ):

        self.__metaclass__ = abc.ABCMeta

        self._city_name = city_name
        self._base_name = name_to_save
        # The minimum distance for arriving into the goal point in
        # order to consider ir a success
        self._distance_for_success = distance_for_success
        # The object used to record the benchmark and to able to continue after
        self._recording = Recording(name_to_save=name_to_save,
                                    continue_experiment=continue_experiment,
                                    save_images=save_images
                                    )

        # We have a default planner instantiated that produces high level commands
        self._planner = Planner(city_name)

    def benchmark_agent(self, experiment_suite, agent, client):
        """
        Function to benchmark the agent.
        It first check the log file for this benchmark.
        if it exist it continues from the experiment where it stopped.


        Args:
            experiment_suite
            agent: an agent object with the run step class implemented.
            client:


        Return:
            A dictionary with all the metrics computed from the
            agent running the set of experiments.
        """

        # Instantiate a metric object that will be used to compute the metrics for
        # the benchmark afterwards.
        metrics_object = Metrics(experiment_suite.metrics_parameters,
                                 experiment_suite.dynamic_tasks)

        # Function return the current pose and task for this benchmark.
        start_pose, start_experiment = self._recording.get_pose_and_experiment(
            experiment_suite.get_number_of_poses_task())

        logging.info('START')

        for experiment in experiment_suite.get_experiments()[int(start_experiment):]:

            positions = client.load_settings(
                experiment.conditions).player_start_spots

            self._recording.log_start(experiment.task)

            for pose in experiment.poses[start_pose:]:
                for rep in range(experiment.repetitions):

                    start_index = pose[0]
                    end_index = pose[1]

                    client.start_episode(start_index)
                    # Print information on
                    logging.info('======== !!!! ==========')
                    logging.info(' Start Position %d End Position %d ',
                                 start_index, end_index)

                    self._recording.log_poses(start_index, end_index,
                                              experiment.Conditions.WeatherId)

                    # Calculate the initial distance for this episode
                    initial_distance = \
                        sldist(
                            [positions[start_index].location.x, positions[start_index].location.y],
                            [positions[end_index].location.x, positions[end_index].location.y])

                    time_out = experiment_suite.calculate_time_out(
                        self._get_shortest_path(positions[start_index], positions[end_index]))

                    # running the agent
                    (result, reward_vec, control_vec, final_time, remaining_distance) = \
                        self._run_navigation_episode(
                            agent, client, time_out, positions[end_index],
                            str(experiment.Conditions.WeatherId) + '_'
                            + str(experiment.task) + '_' + str(start_index)
                            + '.' + str(end_index))

                    # Write the general status of the just ran episode
                    self._recording.write_summary_results(
                        experiment, pose, rep, initial_distance,
                        remaining_distance, final_time, time_out, result)

                    # Write the details of this episode.
                    self._recording.write_measurements_results(experiment, rep, pose, reward_vec,
                                                               control_vec)
                    if result > 0:
                        logging.info('+++++ Target achieved in %f seconds! +++++',
                                     final_time)
                    else:
                        logging.info('----- Timeout! -----')

            start_pose = 0

        self._recording.log_end()

        return metrics_object.compute(self._recording.path)

    def get_path(self):
        """
        Returns the path were the log was saved.
        """
        return self._recording.path

    def _get_directions(self, current_point, end_point):
        """
        Class that should return the directions to reach a certain goal
        """

        directions = self._planner.get_next_command(
            (current_point.location.x,
             current_point.location.y, 0.22),
            (current_point.orientation.x,
             current_point.orientation.y,
             current_point.orientation.z),
            (end_point.location.x, end_point.location.y, 0.22),
            (end_point.orientation.x, end_point.orientation.y, end_point.orientation.z))
        return directions

    def _get_shortest_path(self, start_point, end_point):
        """
        Calculates the shortest path between two points considering the road netowrk
        """

        return self._planner.get_shortest_path_distance(
            [
                start_point.location.x, start_point.location.y, 0.22], [
                start_point.orientation.x, start_point.orientation.y, 0.22], [
                end_point.location.x, end_point.location.y, end_point.location.z], [
                end_point.orientation.x, end_point.orientation.y, end_point.orientation.z])

    def _run_navigation_episode(
            self,
            agent,
            client,
            time_out,
            target,
            episode_name):
        """
         Run one episode of the benchmark (Pose) for a certain agent.


        Args:
            agent: the agent object
            client: an object of the carla client to communicate
            with the CARLA simulator
            time_out: the time limit to complete this episode
            target: the target to reach
            episode_name: The name for saving images of this episode

        """

        # Send an initial command.
        measurements, sensor_data = client.read_data()
        client.send_control(VehicleControl())

        initial_timestamp = measurements.game_timestamp
        current_timestamp = initial_timestamp

        # The vector containing all measurements produced on this episode
        measurement_vec = []
        # The vector containing all controls produced on this episode
        control_vec = []
        frame = 0
        distance = 10000
        success = False

        while (current_timestamp - initial_timestamp) < (time_out * 1000) and not success:

            # Read data from server with the client
            measurements, sensor_data = client.read_data()
            # The directions to reach the goal are calculated.
            directions = self._get_directions(measurements.player_measurements.transform, target)
            # Agent process the data.
            control = agent.run_step(measurements, sensor_data, directions, target)
            # Send the control commands to the vehicle
            client.send_control(control)

            # save images if the flag is activated
            self._recording.save_images(sensor_data, episode_name, frame)

            current_x = measurements.player_measurements.transform.location.x
            current_y = measurements.player_measurements.transform.location.y

            logging.info("Controller is Inputting:")
            logging.info('Steer = %f Throttle = %f Brake = %f ',
                         control.steer, control.throttle, control.brake)

            current_timestamp = measurements.game_timestamp
            # Get the distance travelled until now
            distance = sldist([current_x, current_y],
                              [target.location.x, target.location.y])
            # Write status of the run on verbose mode
            logging.info('Status:')
            logging.info(
                '[d=%f] c_x = %f, c_y = %f ---> t_x = %f, t_y = %f',
                float(distance), current_x, current_y, target.location.x,
                target.location.y)
            # Check if reach the target
            if distance < self._distance_for_success:
                success = True

            # Increment the vectors and append the measurements and controls.
            frame += 1
            measurement_vec.append(measurements.player_measurements)
            control_vec.append(control)

        if success:
            return 1, measurement_vec, control_vec, float(
                current_timestamp - initial_timestamp) / 1000.0, distance
        return 0, measurement_vec, control_vec, time_out, distance
Esempio n. 11
0
class DrivingBenchmark(object):
    """
    The Benchmark class, controls the execution of the benchmark interfacing
    an Agent class with a set Suite.


    The benchmark class must be inherited with a class that defines the
    all the experiments to be run by the agent
    """
    def __init__(self,
                 city_name='Town01',
                 name_to_save='Test',
                 continue_experiment=False,
                 save_images=False,
                 distance_for_success=2.0):
        """
        Args
            city_name:
            name_to_save:
            continue_experiment:
            save_images:
            distance_for_success:
            collisions_as_failure: if this flag is set to true, episodes will terminate as failure, when the car collides.
        """

        self.__metaclass__ = abc.ABCMeta

        self._city_name = city_name
        self._base_name = name_to_save
        # The minimum distance for arriving into the goal point in
        # order to consider ir a success
        self._distance_for_success = distance_for_success
        # The object used to record the benchmark and to able to continue after
        self._recording = Recording(name_to_save=name_to_save,
                                    continue_experiment=continue_experiment,
                                    save_images=save_images)

        # We have a default planner instantiated that produces high level commands
        self._planner = Planner(city_name)

        # TO keep track of the previous collisions
        self._previous_pedestrian_collision = 0
        self._previous_vehicle_collision = 0
        self._previous_other_collision = 0

    def benchmark_agent(self, experiment_suite, agent, client):
        """
        Function to benchmark the agent.
        It first check the log file for this benchmark.
        if it exist it continues from the experiment where it stopped.


        Args:
            experiment_suite
            agent: an agent object with the run step class implemented.
            client:


        Return:
            A dictionary with all the metrics computed from the
            agent running the set of experiments.sldi
        """

        # Instantiate a metric object that will be used to compute the metrics for
        # the benchmark afterwards.
        metrics_object = Metrics(experiment_suite.metrics_parameters,
                                 experiment_suite.dynamic_tasks)

        # Function return the current pose and task for this benchmark.
        start_pose, start_experiment = self._recording.get_pose_and_experiment(
            experiment_suite.get_number_of_poses_task())

        logging.info('START')

        for experiment in experiment_suite.get_experiments(
        )[int(start_experiment):]:

            positions = client.load_settings(
                experiment.conditions).player_start_spots
            t = len(positions)
            self._recording.log_start(experiment.task)

            for pose in experiment.poses[start_pose:]:
                for rep in range(experiment.repetitions):

                    start_index = pose[0]
                    end_index = pose[1]

                    client.start_episode(start_index)
                    # Print information on
                    logging.info('======== !!!! ==========')
                    logging.info(' Start Position %d End Position %d ',
                                 start_index, end_index)

                    self._recording.log_poses(start_index, end_index,
                                              experiment.Conditions.WeatherId)

                    # Calculate the initial distance for this episode
                    initial_distance = \
                        sldist(
                            [positions[start_index].location.x, positions[start_index].location.y],
                            [positions[end_index].location.x, positions[end_index].location.y])

                    time_out = experiment_suite.calculate_time_out(
                        self._get_shortest_path(positions[start_index],
                                                positions[end_index]))

                    # running the agent
                    (result, reward_vec, control_vec, final_time, remaining_distance, col_ped, col_veh, col_oth,metList) = \
                        self._run_navigation_episode(
                            agent, client, time_out, positions[end_index],
                            str(experiment.Conditions.WeatherId) + '_'
                            + str(experiment.task) + '_' + str(start_index)
                            + '.' + str(end_index), experiment_suite.metrics_parameters,
                            experiment_suite.collision_as_failure)

                    # Write the general status of the just ran episode
                    self._recording.write_summary_results(
                        experiment, pose, rep, initial_distance,
                        remaining_distance, final_time, time_out, result,
                        col_ped, col_veh, col_oth)

                    # Write the details of this episode.
                    self._recording.write_measurements_results(
                        experiment, rep, pose, reward_vec, control_vec,
                        metList)
                    if result > 0:
                        logging.info(
                            '+++++ Target achieved in %f seconds! +++++',
                            final_time)
                    else:
                        logging.info('----- Timeout! -----')

            start_pose = 0

        self._recording.log_end()

        return metrics_object.compute(self._recording.path)

    def get_path(self):
        """
        Returns the path were the log was saved.
        """
        return self._recording.path

    def _get_directions(self, current_point, end_point):
        """
        Class that should return the directions to reach a certain goal
        """

        directions = self._planner.get_next_command(
            (current_point.location.x, current_point.location.y, 0.22),
            (current_point.orientation.x, current_point.orientation.y,
             current_point.orientation.z),
            (end_point.location.x, end_point.location.y, 0.22),
            (end_point.orientation.x, end_point.orientation.y,
             end_point.orientation.z))
        return directions

    def _get_shortest_path(self, start_point, end_point):
        """
        Calculates the shortest path between two points considering the road netowrk
        """

        return self._planner.get_shortest_path_distance(
            [start_point.location.x, start_point.location.y, 0.22],
            [start_point.orientation.x, start_point.orientation.y, 0.22],
            [end_point.location.x, end_point.location.y, end_point.location.z],
            [
                end_point.orientation.x, end_point.orientation.y,
                end_point.orientation.z
            ])

    def _has_agent_collided(self, measurement, metrics_parameters):
        """
            This function must have a certain state and only look to one measurement.
        """
        collided_veh = 0
        collided_ped = 0
        collided_oth = 0

        if (measurement.collision_vehicles - self._previous_vehicle_collision) \
                > metrics_parameters['collision_vehicles']['threshold']/2.0:
            collided_veh = 1
        if (measurement.collision_pedestrians - self._previous_pedestrian_collision) \
                > metrics_parameters['collision_pedestrians']['threshold']/2.0:
            collided_ped = 1
        if (measurement.collision_other - self._previous_other_collision) \
                > metrics_parameters['collision_other']['threshold']/2.0:
            collided_oth = 1

        self._previous_pedestrian_collision = measurement.collision_pedestrians
        self._previous_vehicle_collision = measurement.collision_other

        return collided_ped, collided_veh, collided_oth

    def _is_agent_stuck(self, measurements, stuck_vec, old_coll):
        # break the episode when the agent is stuck on a static object
        coll_other = measurements.collision_other
        coll_other -= old_coll
        otherlane = measurements.intersection_otherlane > 0.4
        offroad = measurements.intersection_offroad > 0.3
        logging.info(
            "offroad: {}, otherlane: {}, coll_other: {}, old_coll: {}".format(
                offroad, otherlane, coll_other, old_coll))

        # if still driving or got unstuck (v > 4km/h)
        if measurements.forward_speed * 3.6 > 4:
            cycle_signal(stuck_vec, 0)
            if coll_other:
                old_coll += coll_other
        elif offroad or otherlane or coll_other:
            cycle_signal(stuck_vec, 1)
        else:
            cycle_signal(stuck_vec, 0)

        return all(stuck_vec), stuck_vec, old_coll

    def _run_navigation_episode(self, agent, client, time_out, target,
                                episode_name, metrics_parameters,
                                collision_as_failure):
        """
         Run one episode of the benchmark (Pose) for a certain agent.


        Args:
            agent: the agent object
            client: an object of the carla client to communicate
            with the CARLA simulator
            time_out: the time limit to complete this episode
            target: the target to reach
            episode_name: The name for saving images of this episode
            metrics_object: The metrics object to check for collisions

        """

        # Send an initial command.
        time.sleep(2)
        measurements, sensor_data = client.read_data()
        client.send_control(VehicleControl())

        ### Reset CAL agent
        agent.reset_state()

        initial_timestamp = measurements.game_timestamp
        current_timestamp = initial_timestamp

        # The vector containing all measurements produced on this episode
        measurement_vec = []
        # The vector containing all controls produced on this episode
        control_vec = []
        frame = 0
        distance = 10000
        col_ped, col_veh, col_oth = 0, 0, 0
        fail = False
        success = False

        ### own metrics
        stuck_vec = [0] * 60  # measure for 60 frames (6 seconds)
        center_distance_vec = []
        old_collision_value = 0
        direction_vec = []

        #edited ash
        metList = []

        while not fail and not success:

            # Read data from server with the client
            measurements, sensor_data = client.read_data()
            # The directions to reach the goal are calculated.
            directions = self._get_directions(
                measurements.player_measurements.transform, target)
            # Agent process the data.
            curr_time = time.time()
            control, prediction = agent.run_step(measurements, sensor_data,
                                                 directions, target)
            control.steer = measurements.player_measurements.autopilot_control.steer
            curr_time = time.time() - curr_time

            # Send the control commands to the vehicle
            client.send_control(control)

            # save images if the flag is activated
            self._recording.save_images(sensor_data, episode_name, frame)

            current_x = measurements.player_measurements.transform.location.x
            current_y = measurements.player_measurements.transform.location.y

            logging.info("Controller is Inputting:")
            logging.info('Steer = %f Throttle = %f Brake = %f ', control.steer,
                         control.throttle, control.brake)

            current_timestamp = measurements.game_timestamp
            # Get the distance travelled until now

            distance = sldist([current_x, current_y],
                              [target.location.x, target.location.y])
            # Write status of the run on verbose mode
            logging.info('Distance to target: %f', float(distance))

            # Check if reach the target
            col_ped, col_veh, col_oth = self._has_agent_collided(
                measurements.player_measurements, metrics_parameters)

            ### CHANGE TO ORIGINAL CODE #####################
            is_stuck, stuck_vec, old_collision_value = self._is_agent_stuck(
                measurements.player_measurements, stuck_vec,
                old_collision_value)

            if distance < self._distance_for_success:
                success = True
            elif (current_timestamp - initial_timestamp) > (time_out * 1000):
                fail = True
            elif is_stuck:
                fail = True
            elif collision_as_failure and (col_ped or col_veh or col_oth):
                fail = True

            time_remain = (time_out * 1000 -
                           (current_timestamp - initial_timestamp)) / 1000
            logging.info('Time remaining: %i m %i s', time_remain / 60,
                         time_remain % 60)
            logging.info('')

            # Increment the vectors and append the measurements and controls.
            frame += 1
            measurement_vec.append(measurements.player_measurements)
            control_vec.append(control)
            temp = agent.getMetData()
            temp['game_timestamp'] = measurements.game_timestamp
            metList.append(temp)
            print("stp number: ", frame, " ", temp, " elapsed time: ",
                  curr_time)

            #my code...
            player = {}
            peds = []
            px = measurements.player_measurements.transform.location.x
            py = measurements.player_measurements.transform.location.y

            player['pos_x'] = px
            player['pos_y'] = py

            yaw = measurements.player_measurements.transform.rotation.yaw
            yaw = math.radians(yaw)
            sfile = open(
                '/home/self-driving/Desktop/CARLA_0.8.2/Indranil/CAL-master_copy_new/python_client/_benchmarks_results/sigData.csv',
                'a+')
            pfile = open(
                '/home/self-driving/Desktop/CARLA_0.8.2/Indranil/CAL-master_copy_new/python_client/_benchmarks_results/pData.csv',
                'a+')

            hazard = False

            sig = False
            sigState = ''

            speed_post = False
            speed_post_state = ''

            veh_inside_box = False
            veh_dist = 50.0
            '''
            prediction keys:
            center_distance
            hazard_stop
            red_light
            relative_angle
            speed_sign
            veh_distance 
            '''
            front_axel_x = 2.33

            for a in measurements.non_player_agents:

                if a.HasField('traffic_light'):

                    llx, lly = a.traffic_light.transform.location.x, a.traffic_light.transform.location.y

                    nx, ny = self.getNewCord((px, py), (llx, lly), yaw)

                    if self.inside_a2(nx, ny):
                        print("\n--traffic light found---!!")
                        print("State :", a.traffic_light.state)
                        sig = True
                        sigState = str(a.traffic_light.state)

                if a.HasField('speed_limit_sign'):
                    llx, lly = a.speed_limit_sign.transform.location.x, a.speed_limit_sign.transform.location.y
                    nx, ny = self.getNewCord((px, py), (llx, lly), yaw)
                    if self.inside_a2(nx, ny):
                        print("\n--Speed Sign found---!!")
                        print("Limit :", a.speed_limit_sign.speed_limit * 3.6)
                        speed_post = True
                        speed_post_state = str(
                            math.floor(a.speed_limit_sign.speed_limit * 3.6))

                if a.HasField('pedestrian'):
                    llx, lly = a.pedestrian.transform.location.x, a.pedestrian.transform.location.y
                    ped = {}
                    ped['pos_x'] = llx
                    ped['pos_y'] = lly
                    peds.append(ped)
                    nx, ny = self.getNewCord((px, py), (llx, lly), yaw)
                    if self.inside_a1(nx, ny):
                        print("\n--Pedestrian Hazard---!!")
                        print("Pedestrian in front ")
                        hazard = True

                if a.HasField('vehicle'):
                    llx, lly = a.vehicle.transform.location.x, a.vehicle.transform.location.y
                    nx, ny = self.getNewCord((px, py), (llx, lly), yaw)
                    hlen = a.vehicle.bounding_box.extent.x
                    if self.inside_a3(nx, ny):
                        distan = self.getdis(0 + front_axel_x, 0, nx,
                                             ny - hlen)
                        veh_inside_box = True
                        veh_dist = min(distan, veh_dist)
                        print("Vehicle in front distance,", distan,
                              " Predicted ", prediction['veh_distance'])

            player['peds'] = peds

            #write to file if hazard stop is true in current frame...
            if hazard is True:
                sfile.write(
                    str('{:0>6d}'.format(frame)) + ",Hazard," + str(hazard) +
                    "," + str(prediction['hazard_stop'][0]) + "," +
                    str(prediction['hazard_stop'][1]) + "\n")
            else:
                sfile.write(
                    str('{:0>6d}'.format(frame)) + ",Hazard," + str(False) +
                    "," + str(prediction['hazard_stop'][0]) + "," +
                    str(prediction['hazard_stop'][1]) + "\n")
            if sig is True:
                sfile.write(
                    str('{:0>6d}'.format(frame)) + "," + "Traffic," +
                    sigState + "," + str(prediction['red_light'][0]) + "," +
                    str(prediction['red_light'][1]) + "\n")
                print("Traffic light.. Actual:", a.traffic_light.state,
                      " Predicted", prediction['red_light'][0])
            else:
                sfile.write(
                    str('{:0>6d}'.format(frame)) + "," + "Traffic," + '0' +
                    "," + str(prediction['red_light'][0]) + "," +
                    str(prediction['red_light'][1]) + "\n")
            if speed_post is True:
                sfile.write(
                    str('{:0>6d}'.format(frame)) + ",SpeedSign," +
                    speed_post_state + "," + str(prediction['speed_sign'][0]) +
                    "," + str(prediction['speed_sign'][1]) + "\n")
            else:
                sfile.write(
                    str('{:0>6d}'.format(frame)) + ",SpeedSign," + '-1' + "," +
                    str(prediction['speed_sign'][0]) + "," +
                    str(prediction['speed_sign'][1]) + "\n")

            if veh_inside_box is True:
                sfile.write(
                    str('{:0>6d}'.format(frame)) + ",Vehicle," +
                    str(veh_dist) + "," + str(prediction['veh_distance']) +
                    "\n")
            else:
                sfile.write(
                    str('{:0>6d}'.format(frame)) + ",Vehicle," + str(50.0) +
                    "," + str(prediction['veh_distance']) + "\n")

            sfile.write(
                str('{:0>6d}'.format(frame)) + ",CenterDist," +
                str(temp['centerDist']) + "," +
                str(prediction['center_distance']) + "\n")

            sfile.write(
                str('{:0>6d}'.format(frame)) + ",Angel," + str(yaw) + "," +
                str(prediction['relative_angle']) + "\n")

            jout = json.dumps(player)
            pfile.write(jout + "\n")

        if success:
            return 1, measurement_vec, control_vec, float(
                current_timestamp - initial_timestamp
            ) / 1000.0, distance, col_ped, col_veh, col_oth, metList
        return 0, measurement_vec, control_vec, time_out, distance, col_ped, col_veh, col_oth, metList

    def getNewCord(self, origin, point, psi):
        a = np.array([[math.cos(psi), -math.sin(psi)],
                      [math.sin(psi), math.cos(psi)]])
        b = np.array([(point[0] - origin[0]), (point[1] - origin[1])])
        x = np.linalg.solve(a, b)
        return x

    def getdis(self, x1, y1, x2, y2):
        return math.sqrt((x2 - x1)**2 + (y2 - y1)**2)

    def inside_a2(self, x, y):
        front_axel_x = 0
        if (x >= (7.4 + front_axel_x) and x <=
            (14.0 + front_axel_x)) and (y >= 0.8 and y <= 5.8):
            return True
        return False

    def inside_a1(self, x, y):
        front_axel_x = 2.33
        if (x >= (0.0 + front_axel_x) and x <=
            (8.2 + front_axel_x)) and (y >= -2.0 and y <= 2.0):
            return True
        return False

    def inside_a3(self, x, y):
        front_axel_x = 2.33
        if (x >= (0 + front_axel_x) and x <=
            (50 + front_axel_x)) and (y >= -1.6 and y <= 1.6):
            return True
        return False
Esempio n. 12
0
class StraightDriveEnv(CarlaEnv):
    def __init__(self,
                 client,
                 frame_skip=1,
                 cam_width=800,
                 cam_height=600,
                 town_string='Town01'):
        super(StraightDriveEnv, self).__init__()
        self.frame_skip = frame_skip
        self.client = client
        self._planner = Planner(town_string)

        camera0 = Camera('CameraRGB')
        camera0.set(CameraFOV=100)
        camera0.set_image_size(cam_height, cam_width)
        camera0.set_position(200, 0, 140)
        camera0.set_rotation(-15.0, 0, 0)

        self.start_goal_pairs = [[36, 40], [39, 35], [110, 114], [7,
                                                                  3], [0, 4],
                                 [68, 50], [61, 59], [47, 64], [147, 90],
                                 [33, 87], [26, 19], [80, 76], [45, 49],
                                 [55, 44], [29, 107], [95, 104], [84, 34],
                                 [53, 67], [22, 17], [91, 148], [20, 107],
                                 [78, 70], [95, 102], [68, 44], [45, 69]]

        vehicles = 0
        pedestrians = 0
        weather = 1

        settings = CarlaSettings()
        settings.set(
            SynchronousMode=True,
            SendNonPlayerAgentsInfo=True,
            NumberOfVehicles=vehicles,
            NumberOfPedestrians=pedestrians,
            WeatherId=weather,
        )

        settings.randomize_seeds()
        settings.add_sensor(camera0)

        self.scene = self.client.load_settings(settings)

        img_shape = (cam_width, cam_height, 3)
        self.observation_space = spaces.Tuple(
            (spaces.Box(-np.inf, np.inf, (3, )), spaces.Box(0, 255,
                                                            img_shape)))
        self.action_space = spaces.Box(-np.inf, np.inf, shape=(3, ))

        self.prev_state = np.array([0., 0., 0.])
        self.prev_collisions = np.array([0., 0., 0.])
        self.prev_intersections = np.array([0., 0.])

    def step(self, action):
        steer = np.clip(action[0], -1, 1)
        throttle = np.clip(action[1], 0, 1)
        brake = np.clip(action[2], 0, 1)
        for i in range(self.frame_skip):
            self.t += 1
            self.client.send_control(steer=steer,
                                     throttle=throttle,
                                     brake=brake,
                                     hand_brake=False,
                                     reverse=False)

        measurements, sensor_data = self.client.read_data()
        sensor_data = self._process_image(sensor_data['CameraRGB'])
        current_time = measurements.game_timestamp

        state, collisions, intersections, onehot = self._process_measurements(
            measurements)
        reward, dist_goal = self._calculate_reward(state, collisions,
                                                   intersections)
        done = self._calculate_done(collisions, state, current_time)

        self.prev_state = np.array(state)
        self.prev_collisions = np.array(collisions)
        self.prev_intersections = np.array(intersections)

        measurement_obs = self._generate_obs(state, collisions, onehot,
                                             dist_goal)

        return (measurement_obs, sensor_data), reward, done, {}

    def reset(self):
        self._generate_start_goal_pair()

        print('Starting new episode...')
        # Blocking function until episode is ready
        self.client.start_episode(self.start_idx)

        measurements, sensor_data = self.client.read_data()
        sensor_data = self._process_image(sensor_data['CameraRGB'])

        state, collisions, intersections, onehot = self._process_measurements(
            measurements)

        self.prev_state = np.array(state)
        self.prev_collisions = np.array(collisions)
        self.prev_intersections = np.array(intersections)

        self.start_time = measurements.game_timestamp
        self.t = 0

        pos = np.array(state[0:2])
        dist_goal = np.linalg.norm(pos - self.goal)

        measurement_obs = self._generate_obs(state, collisions, onehot,
                                             dist_goal)

        return (measurement_obs, sensor_data)

    def _calculate_done(self, collisions, state, current_time):
        pos = np.array(state[0:2])
        dist_goal = np.linalg.norm(pos - self.goal)

        return self._is_timed_out(current_time) or self._is_goal(dist_goal)

        # Not described in paper, but should be there for safe driving
        return self._is_timed_out() and self._collision_on_step(dist_goal)

    def _calculate_planner_onehot(self, measurements):
        # return np.array([0, 0, 0, 0, 1]) # TODO need to debug
        print(type(self.end_point.location), type(self.end_point.orientation))
        val = self._planner.get_next_command(measurements.location,
                                             measurements.orientation,
                                             self.end_point.location,
                                             self.end_point.orientation)

        if val == 0.0:
            return np.array([1, 0, 0, 0, 0])

        onehot = np.zeros(5)
        val = int(val) - 1
        onehot[val] = 1

        return onehot

    def _calculate_reward(self, state, collisions, intersections):
        pos = np.array(state[0:2])
        dist_goal = np.linalg.norm(pos - self.goal)
        dist_goal_prev = np.linalg.norm(self.prev_state[0:2] - self.goal)

        speed = state[2]

        # TODO: Check this?
        r = 1000 * (dist_goal_prev - dist_goal) / 10 + 0.05 * (speed - self.prev_state[2]) \
         - 2 * (sum(intersections) - sum(self.prev_intersections))

        return r, dist_goal

    def _calculate_timeout(self):
        self.timeout_t = (
            (self.timeout_dist / 100000.0) / 10.0) * 3600.0 + 10.0

    def _collision_on_step(self, collisions):
        return sum(collisions) > 0

    def _generate_obs(self, state, collisions, onehot, dist_goal):
        speed = state[2]
        collisions = np.sum(collisions)

        return np.concatenate((np.array([speed, dist_goal,
                                         collisions]), np.array(onehot)))

    def _generate_start_goal_pair(self):
        # Choose one player start at random.
        self.position_index = np.random.randint(0,
                                                len(self.start_goal_pairs) - 1)
        self.start_idx = self.start_goal_pairs[self.position_index][0]
        self.goal_idx = self.start_goal_pairs[self.position_index][1]

        start_point = self.scene.player_start_spots[self.start_idx]
        end_point = self.scene.player_start_spots[self.goal_idx]
        self.end_point = end_point

        self.goal = [end_point.location.x / 100,
                     end_point.location.y / 100]  # cm -> m

        self.timeout_dist = self._planner.get_shortest_path_distance(
            [start_point.location.x, start_point.location.y, 22],
            [start_point.orientation.x, start_point.orientation.y, 22],
            [end_point.location.x, end_point.location.y, 22],
            [end_point.orientation.x, end_point.orientation.y, 22])

        self._calculate_timeout()

    def _is_goal(self, distance):
        return distance < 2.0

    def _is_timed_out(self, current_time):
        return (current_time - self.start_time) > (self.timeout_t * 1000)

    def _process_image(self, carla_raw_img):
        return resize(to_rgb_array(carla_raw_img), (84, 84))

    def _process_measurements(self, measurements):
        player_measurements = measurements.player_measurements

        pos_x = player_measurements.transform.location.x / 100  # cm -> m
        pos_y = player_measurements.transform.location.y / 100
        speed = player_measurements.forward_speed

        col_cars = player_measurements.collision_vehicles
        col_ped = player_measurements.collision_pedestrians
        col_other = player_measurements.collision_other

        other_lane = player_measurements.intersection_otherlane
        offroad = player_measurements.intersection_offroad

        onehot = self._calculate_planner_onehot(player_measurements.transform)

        return np.array([pos_x, pos_y,
                         speed]), np.array([col_cars, col_ped, col_other
                                            ]), np.array([other_lane,
                                                          offroad]), onehot
Esempio n. 13
0
class CarlaHuman(Driver):
    def __init__(self, driver_conf):
        Driver.__init__(self)
        # some behaviors
        self._autopilot = driver_conf.autopilot
        self._reset_period = driver_conf.reset_period  # those reset period are in the actual system time, not in simulation time
        self._goal_reaching_threshold = 3
        self.use_planner = driver_conf.use_planner
        # we need the planner to find a valid episode, so we initialize one no matter what

        self._world = None
        self._vehicle = None
        self._agent_autopilot = None
        self._camera_center = None
        self._spectator = None
        # (last) images store for several cameras
        self._data_buffers = dict()
        self.update_once = False
        self._collision_events = []
        self.collision_sensor = None

        if __CARLA_VERSION__ == '0.8.X':
            self.planner = Planner(driver_conf.city_name)
        else:
            self.planner = None
            self.use_planner = False

        # resources
        self._host = driver_conf.host
        self._port = driver_conf.port

        # various config files
        self._driver_conf = driver_conf
        self._config_path = driver_conf.carla_config

        # some initializations
        self._straight_button = False
        self._left_button = False
        self._right_button = False

        self._rear = False
        self._recording = False
        self._skiped_frames = 20
        self._stucked_counter = 0

        self._prev_time = datetime.now()
        self._episode_t0 = datetime.now()

        self._vehicle_prev_location = namedtuple("vehicle", "x y z")
        self._vehicle_prev_location.x = 0.0
        self._vehicle_prev_location.y = 0.0
        self._vehicle_prev_location.z = 0.0

        self._camera_left = None
        self._camera_right = None
        self._camera_center = None

        self._actor_list = []

        self._sensor_list = []
        self._weather_list = [
            'ClearNoon', 'CloudyNoon', 'WetNoon', 'WetCloudyNoon',
            'MidRainyNoon', 'HardRainNoon', 'SoftRainNoon', 'ClearSunset',
            'CloudySunset', 'WetSunset', 'WetCloudySunset', 'MidRainSunset',
            'HardRainSunset', 'SoftRainSunset'
        ]

        self._current_weather = 4

        self._current_command = 2.0

        # steering wheel
        self._steering_wheel_flag = True

        if self._steering_wheel_flag:
            self._is_on_reverse = False
            self._control = VehicleControl()
            self._parser = SafeConfigParser()
            self._parser.read('wheel_config.ini')
            self._steer_idx = int(
                self._parser.get('G29 Racing Wheel', 'steering_wheel'))
            self._throttle_idx = int(
                self._parser.get('G29 Racing Wheel', 'throttle'))
            self._brake_idx = int(self._parser.get('G29 Racing Wheel',
                                                   'brake'))
            self._reverse_idx = int(
                self._parser.get('G29 Racing Wheel', 'reverse'))
            self._handbrake_idx = int(
                self._parser.get('G29 Racing Wheel', 'handbrake'))
        self.last_timestamp = lambda x: x
        self.last_timestamp.elapsed_seconds = 0.0
        self.last_timestamp.delta_seconds = 0.2

        self.initialize_map(driver_conf.city_name)

    def start(self):
        if __CARLA_VERSION__ == '0.8.X':
            self.carla = CarlaClient(self._host, int(self._port), timeout=120)
            self.carla.connect()
        else:
            self.carla = CarlaClient(self._host, int(self._port))
            self.carla.set_timeout(5000)
            wd = self.carla.get_world()
            self.wd = wd
            settings = wd.get_settings()
            settings.synchronous_mode = True
            wd.apply_settings(settings)

        self._reset()

        if not self._autopilot:
            pygame.joystick.init()

            joystick_count = pygame.joystick.get_count()
            if joystick_count > 1:
                print("Please Connect Just One Joystick")
                raise ValueError()

            self.joystick = pygame.joystick.Joystick(0)
            self.joystick.init()

    def test_alive(self):
        if not hasattr(self, "wd"):
            return False
        wd = self.wd
        wd.tick()
        try:
            wd.wait_for_tick(5.0)
        except:
            return False
        return True

    def __del__(self):
        if hasattr(self, 'carla'):
            print("destructing the connection")
            if __CARLA_VERSION__ == '0.8.X':
                self.carla.disconnect()
            else:
                alive = self.test_alive()
                # destroy old actors
                print('destroying actors')
                if alive:
                    if len(self._actor_list) > 0:
                        for actor in self._actor_list:
                            actor.destroy()
                self._actor_list = []
                print('done.')

                if self._vehicle is not None:
                    if alive:
                        self._vehicle.destroy()
                    self._vehicle = None
                if self._camera_center is not None:
                    if alive:
                        self._camera_center.destroy()
                    self._camera_center = None
                if self._camera_left is not None:
                    if alive:
                        self._camera_left.destroy()
                    self._camera_left = None
                if self._camera_right is not None:
                    if alive:
                        self._camera_right.destroy()
                    self._camera_right = None
                if self.collision_sensor is not None:
                    if alive:
                        self.collision_sensor.sensor.destroy()
                    self.collision_sensor = None

                    #  pygame.quit()
            # if self._camera is not None:
            #     self._camera.destroy()
            #     self._camera = None
            # if self._vehicle is not None:
            #     self._vehicle.destroy()
            #     self._vehicle = None

    def try_spawn_random_vehicle_at(self,
                                    blueprints,
                                    transform,
                                    auto_drive=True):
        blueprint = random.choice(blueprints)
        if blueprint.has_attribute('color'):
            color = random.choice(
                blueprint.get_attribute('color').recommended_values)
            blueprint.set_attribute('color', color)
        vehicle = self._world.try_spawn_actor(blueprint, transform)
        if vehicle is not None:
            self._actor_list.append(vehicle)
            if auto_drive:
                # TODO: this won't work in 0.9.5 with Exp_Town
                # however, we don't have traffic in that town right now, so we just ignore this
                vehicle.set_autopilot()
            #print('spawned %r at %s' % (vehicle.type_id, transform.location))
            return True
        return False

    def get_parking_locations(self,
                              filename,
                              z_default=0.0,
                              random_perturb=False):
        with open(filename, "r") as f:
            lines = f.readlines()
            ans = []
            for line in lines:
                x, y, yaw = [float(v.strip()) for v in line.split(",")]
                if random_perturb:
                    x += np.random.normal(
                        0, scale=self._driver_conf.extra_explore_location_std)
                    y += np.random.normal(
                        0, scale=self._driver_conf.extra_explore_location_std)
                    yaw += np.random.normal(
                        0, scale=self._driver_conf.extra_explore_yaw_std)

                ans.append(
                    carla.Transform(location=carla.Location(x=x,
                                                            y=y,
                                                            z=z_default),
                                    rotation=carla.Rotation(roll=0,
                                                            pitch=0,
                                                            yaw=yaw)))
        return ans

    def print_transform(self, t):
        print(t.location.x, t.location.y, t.location.z)
        print(t.rotation.roll, t.rotation.pitch, t.rotation.yaw)

    def _reset(self):
        self._episode_t0 = datetime.now()

        if __CARLA_VERSION__ == '0.8.X':
            # create the carla config based on template and the params passed in
            config = ConfigParser()
            config.optionxform = str
            config.read(self._config_path)
            config.set('CARLA/LevelSettings', 'NumberOfVehicles',
                       self._driver_conf.cars)
            config.set('CARLA/LevelSettings', 'NumberOfPedestrians',
                       self._driver_conf.pedestrians)
            config.set('CARLA/LevelSettings', 'WeatherId',
                       self._driver_conf.weather)
            output = io.StringIO()
            config.write(output)
            scene_descriptions = self.carla.load_settings(output.getvalue())

            # based on the scene descriptions, find the start and end positions
            self.positions = scene_descriptions.player_start_spots
            # the episode_config saves [start_index, end_index]
            self.episode_config = find_valid_episode_position(
                self.positions, self.planner)

            self.carla.start_episode(self.episode_config[0])
            print('RESET ON POSITION ', self.episode_config[0],
                  ", the target location is: ", self.episode_config[1])

        else:
            # destroy old actors
            print('destroying actors')
            for actor in self._actor_list:
                actor.destroy()
            self._actor_list = []
            print('done.')

            # TODO: spawn pedestrains
            # TODO: spawn more vehicles

            if self._autopilot:
                self._current_weather = self._weather_list[
                    int(self._driver_conf.weather) - 1]
            else:
                self._current_weather = random.choice(self._weather_list)
            if not self._autopilot:
                # select one of the random starting points previously selected
                start_positions = np.loadtxt(self._driver_conf.positions_file,
                                             delimiter=',')
                if len(start_positions.shape) == 1:
                    start_positions = start_positions.reshape(
                        1, len(start_positions))

            # TODO: Assign random position from file
            WINDOW_WIDTH = 768
            WINDOW_HEIGHT = 576
            CAMERA_FOV = 103.0

            CAMERA_CENTER_T = carla.Location(x=0.7, y=-0.0, z=1.60)
            CAMERA_LEFT_T = carla.Location(x=0.7, y=-0.4, z=1.60)
            CAMERA_RIGHT_T = carla.Location(x=0.7, y=0.4, z=1.60)

            CAMERA_CENTER_ROTATION = carla.Rotation(roll=0.0,
                                                    pitch=0.0,
                                                    yaw=0.0)
            CAMERA_LEFT_ROTATION = carla.Rotation(roll=0.0,
                                                  pitch=0.0,
                                                  yaw=-45.0)
            CAMERA_RIGHT_ROTATION = carla.Rotation(roll=0.0,
                                                   pitch=0.0,
                                                   yaw=45.0)

            CAMERA_CENTER_TRANSFORM = carla.Transform(
                location=CAMERA_CENTER_T, rotation=CAMERA_CENTER_ROTATION)
            CAMERA_LEFT_TRANSFORM = carla.Transform(
                location=CAMERA_LEFT_T, rotation=CAMERA_LEFT_ROTATION)
            CAMERA_RIGHT_TRANSFORM = carla.Transform(
                location=CAMERA_RIGHT_T, rotation=CAMERA_RIGHT_ROTATION)

            self._world = self.carla.get_world()
            settings = self._world.get_settings()
            settings.synchronous_mode = True
            self._world.apply_settings(settings)

            # add traffic
            blueprints_vehi = self._world.get_blueprint_library().filter(
                'vehicle.*')
            blueprints_vehi = [
                x for x in blueprints_vehi
                if int(x.get_attribute('number_of_wheels')) == 4
            ]
            blueprints_vehi = [
                x for x in blueprints_vehi if not x.id.endswith('isetta')
            ]

            # @todo Needs to be converted to list to be shuffled.
            spawn_points = list(self._world.get_map().get_spawn_points())
            if len(spawn_points) == 0:
                if self.city_name_demo == "Exp_Town":
                    spawn_points = [
                        carla.Transform(
                            location=carla.Location(x=-11.5, y=-8.0, z=2.0))
                    ]

            random.shuffle(spawn_points)

            print('found %d spawn points.' % len(spawn_points))

            # TODO: debug change 50 to 0
            count = 0
            if count > 0:
                for spawn_point in spawn_points:
                    if self.try_spawn_random_vehicle_at(
                            blueprints_vehi, spawn_point):
                        count -= 1
                    if count <= 0:
                        break
            while count > 0:
                time.sleep(0.5)
                if self.try_spawn_random_vehicle_at(
                        blueprints_vehi, random.choice(spawn_points)):
                    count -= 1
            # end traffic addition!

            # begin parking addition
            if hasattr(
                    self._driver_conf, "parking_position_file"
            ) and self._driver_conf.parking_position_file is not None:
                parking_points = self.get_parking_locations(
                    self._driver_conf.parking_position_file)
                random.shuffle(parking_points)
                print('found %d parking points.' % len(parking_points))
                count = 200

                for spawn_point in parking_points:
                    self.try_spawn_random_vehicle_at(blueprints_vehi,
                                                     spawn_point, False)
                    count -= 1
                    if count <= 0:
                        break
            # end of parking addition

            blueprints = self._world.get_blueprint_library().filter('vehicle')
            vechile_blueprint = [
                e for i, e in enumerate(blueprints)
                if e.id == 'vehicle.lincoln.mkz2017'
            ][0]

            if self._vehicle == None or self._autopilot:
                if self._autopilot and self._vehicle is not None:
                    self._vehicle.destroy()
                    self._vehicle = None

                while self._vehicle == None:
                    if self._autopilot:
                        # from designated points
                        if hasattr(self._driver_conf,
                                   "extra_explore_prob") and random.random(
                                   ) < self._driver_conf.extra_explore_prob:
                            extra_positions = self.get_parking_locations(
                                self._driver_conf.extra_explore_position_file,
                                z_default=3.0,
                                random_perturb=True)
                            print(
                                "spawning hero vehicle from the extra exploration"
                            )
                            START_POSITION = random.choice(extra_positions)
                        else:
                            START_POSITION = random.choice(spawn_points)
                    else:
                        random_position = start_positions[
                            np.random.randint(start_positions.shape[0]), :]
                        START_POSITION = carla.Transform(
                            carla.Location(x=random_position[0],
                                           y=random_position[1],
                                           z=random_position[2] + 1.0),
                            carla.Rotation(pitch=random_position[3],
                                           roll=random_position[4],
                                           yaw=random_position[5]))

                    self._vehicle = self._world.try_spawn_actor(
                        vechile_blueprint, START_POSITION)
            else:
                if self._autopilot:
                    # from designated points
                    START_POSITION = random.choice(spawn_points)
                else:
                    random_position = start_positions[
                        np.random.randint(start_positions.shape[0]), :]
                    START_POSITION = carla.Transform(
                        carla.Location(x=random_position[0],
                                       y=random_position[1],
                                       z=random_position[2] + 1.0),
                        carla.Rotation(pitch=random_position[3],
                                       roll=random_position[4],
                                       yaw=random_position[5]))

                self._vehicle.set_transform(START_POSITION)

            print("after spawning the ego vehicle")

            print("warm up process to make the vehicle ego location correct")
            wd = self._world
            for i in range(25):
                wd.tick()
                if not wd.wait_for_tick(10.0):
                    continue
            print("warmup finished")

            if self._autopilot:
                # Nope: self._vehicle.set_autopilot()
                print("before roaming agent")
                self._agent_autopilot = RoamingAgent(self._vehicle)
            print("after roaming agent")
            if self.collision_sensor is not None:
                print("before destroying the sensor")
                self.collision_sensor.sensor.destroy()
                print("after destroying the sensor")
            else:
                print("collision sensor is None")
            self.collision_sensor = CollisionSensor(self._vehicle, self)

            print("after spawning the collision sensor")

            # set weather
            weather = getattr(carla.WeatherParameters, self._current_weather)
            self._vehicle.get_world().set_weather(weather)

            self._spectator = self._world.get_spectator()
            cam_blueprint = self._world.get_blueprint_library().find(
                'sensor.camera.rgb')

            cam_blueprint.set_attribute('image_size_x', str(WINDOW_WIDTH))
            cam_blueprint.set_attribute('image_size_y', str(WINDOW_HEIGHT))
            cam_blueprint.set_attribute('fov', str(CAMERA_FOV))

            if self._camera_center is not None:
                self._camera_center.destroy()
                self._camera_left.destroy()
                self._camera_right.destroy()
                self._camera_center = None

            if self._camera_center == None:
                self._camera_center = self._world.spawn_actor(
                    cam_blueprint,
                    CAMERA_CENTER_TRANSFORM,
                    attach_to=self._vehicle)
                self._camera_left = self._world.spawn_actor(
                    cam_blueprint,
                    CAMERA_LEFT_TRANSFORM,
                    attach_to=self._vehicle)
                self._camera_right = self._world.spawn_actor(
                    cam_blueprint,
                    CAMERA_RIGHT_TRANSFORM,
                    attach_to=self._vehicle)

                self._camera_center.listen(CallBack('CameraMiddle', self))
                self._camera_left.listen(CallBack('CameraLeft', self))
                self._camera_right.listen(CallBack('CameraRight', self))

            # spectator server camera
            self._spectator = self._world.get_spectator()

        self._skiped_frames = 0
        self._stucked_counter = 0
        self._start_time = time.time()

    def get_recording(self):
        if self._autopilot:
            # debug: 0 for debugging
            if self._skiped_frames >= 20:
                return True
            else:
                self._skiped_frames += 1
                return False

        else:
            '''
            if (self.joystick.get_button(8)):
                self._recording = True
            if (self.joystick.get_button(9)):
                self._recording = False
            '''
            if (self.joystick.get_button(6)):
                self._recording = True
                print("start recording!!!!!!!!!!!!!!!!!!!!!!!!1")
            if (self.joystick.get_button(7)):
                self._recording = False
                print("end recording!!!!!!!!!!!!!!!!!!!!!!!!1")
            return self._recording

    def initialize_map(self, city_name):
        self.city_name_demo = city_name
        if city_name == "RFS_MAP":
            path = get_current_folder() + "/maps_demo_area/rfs_demo_area.png"
            im = cv2.imread(path)
            im = im[:, :, :3]
            im = im[:, :, ::-1]
            self.demo_area_map = im
        else:
            print("do nothing since not a city with demo area")
            #raise ValueError("wrong city name: " + city_name)

    def loc_to_pix_rfs_sim(self, loc):
        u = 3.6090651558073654 * loc[1] + 2500.541076487252
        v = -3.6103367739019054 * loc[0] + 2501.862578166202
        return [int(v), int(u)]

    def in_valid_area(self, x, y):
        if self.city_name_demo == "RFS_MAP":
            pos = self.loc_to_pix_rfs_sim([x, y])
            locality = 50  # 100 pixels
            local_area = self.demo_area_map[pos[0] - locality:pos[0] +
                                            locality, pos[1] -
                                            locality:pos[1] + locality, 0] > 0
            valid = np.sum(local_area) > 0
            if not valid:
                print(
                    "detect the vehicle is not in the valid demonstrated area")
            return valid
        else:
            return True

    def get_reset(self):
        if self._autopilot:
            if __CARLA_VERSION__ == '0.8.X':
                # increase the stuck detector if conditions satisfy
                if self._latest_measurements.player_measurements.forward_speed < 0.1:
                    self._stucked_counter += 1
                else:
                    self._stucked_counter = 0

                # if within auto pilot, reset if long enough or has collisions
                if time.time() - self._start_time > self._reset_period \
                  or self._latest_measurements.player_measurements.collision_vehicles    > 0.0 \
                  or self._latest_measurements.player_measurements.collision_pedestrians > 0.0 \
                  or self._latest_measurements.player_measurements.collision_other       > 0.0 \
                  or (self._latest_measurements.player_measurements.intersection_otherlane > 0.0 and self._latest_measurements.player_measurements.autopilot_control.steer < -0.99) \
                  or self._stucked_counter > 250:
                    if self._stucked_counter > 250:
                        reset_because_stuck = True
                    else:
                        reset_because_stuck = False

                    # TODO: commenting out this for debugging issue
                    self._reset()

                    if reset_because_stuck:
                        print("resetting because getting stucked.....")
                        return True
            else:
                # TODO: implement the collision detection algorithm, based on the new API
                if self.last_estimated_speed < 0.1:
                    self._stucked_counter += 1
                else:
                    self._stucked_counter = 0

                if time.time() - self._start_time > self._reset_period \
                or self._last_collided \
                or self._stucked_counter > 250 \
                or not self.in_valid_area(self._latest_measurements.player_measurements.transform.location.x,
                                        self._latest_measurements.player_measurements.transform.location.y):
                    #or np.abs(self._vehicle.get_vehicle_control().steer) > 0.95:
                    #or np.abs(self._vehicle.get_vehicle_control().brake) > 1:
                    # TODO intersection other lane is not available, so omit from the condition right now
                    if self._stucked_counter > 250:
                        reset_because_stuck = True
                    else:
                        reset_because_stuck = False

                    if self._last_collided:
                        print("reset becuase collision")

                    self._reset()

                    if reset_because_stuck:
                        print("resetting because getting stucked.....")
                        return True
        else:
            pass

        return False

    def get_waypoints(self):
        # TODO: waiting for German Ros to expose the waypoints
        wp1 = [1.0, 1.0]
        wp2 = [2.0, 2.0]
        return [wp1, wp2]

    def action_joystick(self):
        # joystick
        steering_axis = self.joystick.get_axis(0)
        acc_axis = self.joystick.get_axis(2)
        brake_axis = self.joystick.get_axis(5)
        # print("axis 0 %f, axis 2 %f, axis 3 %f" % (steering_axis, acc_axis, brake_axis))

        if (self.joystick.get_button(3)):
            self._rear = True
        if (self.joystick.get_button(2)):
            self._rear = False

        control = VehicleControl()
        control.steer = steering_axis
        control.throttle = (acc_axis + 1) / 2.0
        control.brake = (brake_axis + 1) / 2.0
        if control.brake < 0.001:
            control.brake = 0.0
        control.hand_brake = 0
        control.reverse = self._rear

        control.steer -= 0.0822

        #print("steer %f, throttle %f, brake %f" % (control.steer, control.throttle, control.brake))
        pygame.event.pump()

        return control

    def action_steering_wheel(self, jsInputs, jsButtons):
        control = VehicleControl()

        # Custom function to map range of inputs [1, -1] to outputs [0, 1] i.e 1 from inputs means nothing is pressed
        # For the steering, it seems fine as it is
        K1 = 1.0  # 0.55
        steerCmd = K1 * math.tan(1.1 * jsInputs[self._steer_idx])

        K2 = 1.6  # 1.6
        throttleCmd = K2 + (
            2.05 * math.log10(-0.7 * jsInputs[self._throttle_idx] + 1.4) -
            1.2) / 0.92
        if throttleCmd <= 0:
            throttleCmd = 0
        elif throttleCmd > 1:
            throttleCmd = 1

        brakeCmd = 1.6 + (2.05 * math.log10(-0.7 * jsInputs[self._brake_idx] +
                                            1.4) - 1.2) / 0.92
        if brakeCmd <= 0:
            brakeCmd = 0
        elif brakeCmd > 1:
            brakeCmd = 1

        #print("Steer Cmd, ", steerCmd, "Brake Cmd", brakeCmd, "ThrottleCmd", throttleCmd)
        control.steer = steerCmd
        control.brake = brakeCmd
        control.throttle = throttleCmd
        toggle = jsButtons[self._reverse_idx]

        if toggle == 1:
            self._is_on_reverse += 1
        if self._is_on_reverse % 2 == 0:
            control.reverse = False
        if self._is_on_reverse > 1:
            self._is_on_reverse = True

        if self._is_on_reverse:
            control.reverse = True

        control.hand_brake = False  # jsButtons[self.handbrake_idx]

        return control

    def compute_action(self, sensor, speed):
        if not self._autopilot:
            # get pygame input
            for event in pygame.event.get():
                # Possible joystick actions: JOYAXISMOTION JOYBALLMOTION JOYBUTTONDOWN
                # JOYBUTTONUP JOYHATMOTION
                if event.type == pygame.JOYBUTTONDOWN:
                    if event.__dict__['button'] == 0:
                        self._current_command = 2.0
                    if event.__dict__['button'] == 1:
                        self._current_command = 3.0
                    if event.__dict__['button'] == 2:
                        self._current_command = 4.0
                    if event.__dict__['button'] == 3:
                        self._current_command = 5.0
                    if event.__dict__['button'] == 23:
                        self._current_command = 0.0
                    if event.__dict__['button'] == 4:
                        self._reset()
                        return VehicleControl()
                if event.type == pygame.JOYBUTTONUP:
                    self._current_command = 2.0

            #pygame.event.pump()
            numAxes = self.joystick.get_numaxes()
            jsInputs = [
                float(self.joystick.get_axis(i)) for i in range(numAxes)
            ]
            # print (jsInputs)
            jsButtons = [
                float(self.joystick.get_button(i))
                for i in range(self.joystick.get_numbuttons())
            ]

            if self._steering_wheel_flag:
                control = self.action_steering_wheel(jsInputs, jsButtons)
            else:
                control = self.action_joystick()

        else:
            if __CARLA_VERSION__ == '0.8.X':
                # This relies on the calling of get_sensor_data, otherwise self._latest_measurements are not filled
                control = self._latest_measurements.player_measurements.autopilot_control
                print('[Throttle = {}] [Steering = {}] [Brake = {}]'.format(
                    control.throttle, control.steer, control.brake))
            else:
                self._world.tick()
                if self._world.wait_for_tick(10.0):
                    control, self._current_command = self._agent_autopilot.run_step(
                    )

        print('[Throttle = {}] [Steering = {}] [Brake = {}]'.format(
            control.throttle, control.steer, control.brake))
        return control

    def estimate_speed(self):
        vel = self._vehicle.get_velocity()
        speed = m.sqrt(vel.x**2 + vel.y**2 + vel.z**2)

        # speed in m/s
        return speed

    def get_sensor_data(self, goal_pos=None, goal_ori=None):
        if __CARLA_VERSION__ == '0.8.X':
            # return the latest measurement and the next direction
            measurements, sensor_data = self.carla.read_data()
            self._latest_measurements = measurements

            if self.use_planner:
                player_data = measurements.player_measurements
                pos = [
                    player_data.transform.location.x,
                    player_data.transform.location.y, 0.22
                ]
                ori = [
                    player_data.transform.orientation.x,
                    player_data.transform.orientation.y,
                    player_data.transform.orientation.z
                ]

                if sldist([
                        player_data.transform.location.x,
                        player_data.transform.location.y
                ], [
                        self.positions[self.episode_config[1]].location.x,
                        self.positions[self.episode_config[1]].location.y
                ]) < self._goal_reaching_threshold:
                    self._reset()

                direction = self.planner.get_next_command(
                    pos, ori, [
                        self.positions[self.episode_config[1]].location.x,
                        self.positions[self.episode_config[1]].location.y, 0.22
                    ], (1, 0, 0))
            else:
                direction = 2.0
        else:
            self.last_timestamp.elapsed_seconds += 0.2
            #self.last_timestamp = self.carla.get_world().wait_for_tick(30.0)
            #print(timestamp.delta_seconds, "delta seconds")

            #while self.update_once == False:
            #    time.sleep(0.01)

            self.last_estimated_speed = self.estimate_speed()

            data_buffer_lock.acquire()
            sensor_data = copy.deepcopy(self._data_buffers)
            data_buffer_lock.release()

            #self.update_once = False

            collision_lock.acquire()
            colllision_event = self._collision_events
            self._last_collided = len(colllision_event) > 0
            self._collision_events = []
            collision_lock.release()

            if len(colllision_event) > 0:
                print(colllision_event)
            # TODO: make sure those events are actually valid
            if 'Static' in colllision_event:
                collision_other = 1.0
            else:
                collision_other = 0.0
            if "Vehicles" in colllision_event:
                collision_vehicles = 1.0
            else:
                collision_vehicles = 0.0
            if "Pedestrians" in colllision_event:
                collision_pedestrians = 1.0
            else:
                collision_pedestrians = 0.0

            #current_ms_offset = int(math.ceil((datetime.now() - self._episode_t0).total_seconds() * 1000))
            # TODO: get a gametime stamp, instead of os timestamp
            #current_ms_offset = int(self.carla.get_timestamp().elapsed_seconds * 1000)
            #print(current_ms_offset, "ms offset")
            current_ms_offset = self.last_timestamp.elapsed_seconds * 1000

            second_level = namedtuple('second_level', [
                'forward_speed', 'transform', 'collision_other',
                'collision_pedestrians', 'collision_vehicles'
            ])
            transform = namedtuple('transform', ['location', 'orientation'])
            loc = namedtuple('loc', ['x', 'y'])
            ori = namedtuple('ori', ['x', 'y', 'z'])
            Meas = namedtuple('Meas',
                              ['player_measurements', 'game_timestamp'])

            v_transform = self._vehicle.get_transform()
            measurements = Meas(
                second_level(
                    self.last_estimated_speed,
                    transform(
                        loc(v_transform.location.x, v_transform.location.y),
                        ori(v_transform.rotation.pitch,
                            v_transform.rotation.roll,
                            v_transform.rotation.yaw)), collision_other,
                    collision_pedestrians, collision_vehicles),
                current_ms_offset)
            direction = self._current_command

            self._latest_measurements = measurements

            #print('[Speed = {} Km/h] [Direction = {}]'.format(measurements.player_measurements.forward_speed, direction))
        #print(">>>>> planner output direction: ", direction)

        return measurements, sensor_data, direction

    def act(self, control):
        if __CARLA_VERSION__ == '0.8.X':
            self.carla.send_control(control)
        else:
            self._vehicle.apply_control(control)
Esempio n. 14
0
class CarlaEnv(gym.Env):
    def __init__(self, config=ENV_CONFIG):
        self.config = config
        self.city = self.config["server_map"].split("/")[-1]
        if self.config["enable_planner"]:
            self.planner = Planner(self.city)

        if config["discrete_actions"]:
            self.action_space = Discrete(len(DISCRETE_ACTIONS))
        else:
            self.action_space = Box(-1.0, 1.0, shape=(2, ), dtype=np.float32)
        if config["use_depth_camera"]:
            image_space = Box(
                -1.0,
                1.0,
                shape=(config["y_res"], config["x_res"],
                       1 * config["framestack"]),
                dtype=np.float32)
        else:
            image_space = Box(
                0,
                255,
                shape=(config["y_res"], config["x_res"],
                       3 * config["framestack"]),
                dtype=np.uint8)
        self.observation_space = Tuple(  # forward_speed, dist to goal
            [
                image_space,
                Discrete(len(COMMANDS_ENUM)),  # next_command
                Box(-128.0, 128.0, shape=(2, ), dtype=np.float32)
            ])

        # TODO(ekl) this isn't really a proper gym spec
        self._spec = lambda: None
        self._spec.id = "Carla-v0"

        self.server_port = None
        self.server_process = None
        self.client = None
        self.num_steps = 0
        self.total_reward = 0
        self.prev_measurement = None
        self.prev_image = None
        self.episode_id = None
        self.measurements_file = None
        self.weather = None
        self.scenario = None
        self.start_pos = None
        self.end_pos = None
        self.start_coord = None
        self.end_coord = None
        self.last_obs = None

    def init_server(self):
        print("Initializing new Carla server...")
        # Create a new server process and start the client.
        self.server_port = random.randint(10000, 60000)
        self.server_process = subprocess.Popen(
            [
                SERVER_BINARY, self.config["server_map"], "-windowed",
                "-ResX=400", "-ResY=300", "-carla-server",
                "-carla-world-port={}".format(self.server_port)
            ],
            preexec_fn=os.setsid,
            stdout=open(os.devnull, "w"))
        live_carla_processes.add(os.getpgid(self.server_process.pid))

        for i in range(RETRIES_ON_ERROR):
            try:
                self.client = CarlaClient("localhost", self.server_port)
                return self.client.connect()
            except Exception as e:
                print("Error connecting: {}, attempt {}".format(e, i))
                time.sleep(2)

    def clear_server_state(self):
        print("Clearing Carla server state")
        try:
            if self.client:
                self.client.disconnect()
                self.client = None
        except Exception as e:
            print("Error disconnecting client: {}".format(e))
            pass
        if self.server_process:
            pgid = os.getpgid(self.server_process.pid)
            os.killpg(pgid, signal.SIGKILL)
            live_carla_processes.remove(pgid)
            self.server_port = None
            self.server_process = None

    def __del__(self):
        self.clear_server_state()

    def reset(self):
        error = None
        for _ in range(RETRIES_ON_ERROR):
            try:
                if not self.server_process:
                    self.init_server()
                return self._reset()
            except Exception as e:
                print("Error during reset: {}".format(traceback.format_exc()))
                self.clear_server_state()
                error = e
        raise error

    def _reset(self):
        self.num_steps = 0
        self.total_reward = 0
        self.prev_measurement = None
        self.prev_image = None
        self.episode_id = datetime.today().strftime("%Y-%m-%d_%H-%M-%S_%f")
        self.measurements_file = None

        # Create a CarlaSettings object. This object is a wrapper around
        # the CarlaSettings.ini file. Here we set the configuration we
        # want for the new episode.
        settings = CarlaSettings()
        self.scenario = random.choice(self.config["scenarios"])
        assert self.scenario["city"] == self.city, (self.scenario, self.city)
        self.weather = random.choice(self.scenario["weather_distribution"])
        settings.set(
            SynchronousMode=True,
            SendNonPlayerAgentsInfo=True,
            NumberOfVehicles=self.scenario["num_vehicles"],
            NumberOfPedestrians=self.scenario["num_pedestrians"],
            WeatherId=self.weather)
        settings.randomize_seeds()

        if self.config["use_depth_camera"]:
            camera1 = Camera("CameraDepth", PostProcessing="Depth")
            camera1.set_image_size(self.config["render_x_res"],
                                   self.config["render_y_res"])
            camera1.set_position(30, 0, 130)
            settings.add_sensor(camera1)

        camera2 = Camera("CameraRGB")
        camera2.set_image_size(self.config["render_x_res"],
                               self.config["render_y_res"])
        camera2.set_position(30, 0, 130)
        settings.add_sensor(camera2)

        # Setup start and end positions
        scene = self.client.load_settings(settings)
        positions = scene.player_start_spots
        self.start_pos = positions[self.scenario["start_pos_id"]]
        self.end_pos = positions[self.scenario["end_pos_id"]]
        self.start_coord = [
            self.start_pos.location.x // 100, self.start_pos.location.y // 100
        ]
        self.end_coord = [
            self.end_pos.location.x // 100, self.end_pos.location.y // 100
        ]
        print("Start pos {} ({}), end {} ({})".format(
            self.scenario["start_pos_id"], self.start_coord,
            self.scenario["end_pos_id"], self.end_coord))

        # Notify the server that we want to start the episode at the
        # player_start index. This function blocks until the server is ready
        # to start the episode.
        print("Starting new episode...")
        self.client.start_episode(self.scenario["start_pos_id"])

        image, py_measurements = self._read_observation()
        self.prev_measurement = py_measurements
        return self.encode_obs(self.preprocess_image(image), py_measurements)

    def encode_obs(self, image, py_measurements):
        assert self.config["framestack"] in [1, 2]
        prev_image = self.prev_image
        self.prev_image = image
        if prev_image is None:
            prev_image = image
        if self.config["framestack"] == 2:
            image = np.concatenate([prev_image, image], axis=2)
        obs = (image, COMMAND_ORDINAL[py_measurements["next_command"]], [
            py_measurements["forward_speed"],
            py_measurements["distance_to_goal"]
        ])
        self.last_obs = obs
        return obs

    def step(self, action):
        try:
            obs = self._step(action)
            return obs
        except Exception:
            print("Error during step, terminating episode early",
                  traceback.format_exc())
            self.clear_server_state()
            return (self.last_obs, 0.0, True, {})

    def _step(self, action):
        if self.config["discrete_actions"]:
            action = DISCRETE_ACTIONS[int(action)]
        assert len(action) == 2, "Invalid action {}".format(action)
        if self.config["squash_action_logits"]:
            forward = 2 * float(sigmoid(action[0]) - 0.5)
            throttle = float(np.clip(forward, 0, 1))
            brake = float(np.abs(np.clip(forward, -1, 0)))
            steer = 2 * float(sigmoid(action[1]) - 0.5)
        else:
            throttle = float(np.clip(action[0], 0, 1))
            brake = float(np.abs(np.clip(action[0], -1, 0)))
            steer = float(np.clip(action[1], -1, 1))
        reverse = False
        hand_brake = False

        if self.config["verbose"]:
            print("steer", steer, "throttle", throttle, "brake", brake,
                  "reverse", reverse)

        self.client.send_control(
            steer=steer,
            throttle=throttle,
            brake=brake,
            hand_brake=hand_brake,
            reverse=reverse)

        # Process observations
        image, py_measurements = self._read_observation()
        if self.config["verbose"]:
            print("Next command", py_measurements["next_command"])
        if type(action) is np.ndarray:
            py_measurements["action"] = [float(a) for a in action]
        else:
            py_measurements["action"] = action
        py_measurements["control"] = {
            "steer": steer,
            "throttle": throttle,
            "brake": brake,
            "reverse": reverse,
            "hand_brake": hand_brake,
        }
        reward = compute_reward(self, self.prev_measurement, py_measurements)
        self.total_reward += reward
        py_measurements["reward"] = reward
        py_measurements["total_reward"] = self.total_reward
        done = (self.num_steps > self.scenario["max_steps"]
                or py_measurements["next_command"] == "REACH_GOAL"
                or (self.config["early_terminate_on_collision"]
                    and collided_done(py_measurements)))
        py_measurements["done"] = done
        self.prev_measurement = py_measurements

        # Write out measurements to file
        if CARLA_OUT_PATH:
            if not self.measurements_file:
                self.measurements_file = open(
                    os.path.join(
                        CARLA_OUT_PATH,
                        "measurements_{}.json".format(self.episode_id)), "w")
            self.measurements_file.write(json.dumps(py_measurements))
            self.measurements_file.write("\n")
            if done:
                self.measurements_file.close()
                self.measurements_file = None
                if self.config["convert_images_to_video"]:
                    self.images_to_video()

        self.num_steps += 1
        image = self.preprocess_image(image)
        return (self.encode_obs(image, py_measurements), reward, done,
                py_measurements)

    def images_to_video(self):
        videos_dir = os.path.join(CARLA_OUT_PATH, "Videos")
        if not os.path.exists(videos_dir):
            os.makedirs(videos_dir)
        ffmpeg_cmd = (
            "ffmpeg -loglevel -8 -r 60 -f image2 -s {x_res}x{y_res} "
            "-start_number 0 -i "
            "{img}_%04d.jpg -vcodec libx264 {vid}.mp4 && rm -f {img}_*.jpg "
        ).format(
            x_res=self.config["render_x_res"],
            y_res=self.config["render_y_res"],
            vid=os.path.join(videos_dir, self.episode_id),
            img=os.path.join(CARLA_OUT_PATH, "CameraRGB", self.episode_id))
        print("Executing ffmpeg command", ffmpeg_cmd)
        subprocess.call(ffmpeg_cmd, shell=True)

    def preprocess_image(self, image):
        if self.config["use_depth_camera"]:
            assert self.config["use_depth_camera"]
            data = (image.data - 0.5) * 2
            data = data.reshape(self.config["render_y_res"],
                                self.config["render_x_res"], 1)
            data = cv2.resize(
                data, (self.config["x_res"], self.config["y_res"]),
                interpolation=cv2.INTER_AREA)
            data = np.expand_dims(data, 2)
        else:
            data = image.data.reshape(self.config["render_y_res"],
                                      self.config["render_x_res"], 3)
            data = cv2.resize(
                data, (self.config["x_res"], self.config["y_res"]),
                interpolation=cv2.INTER_AREA)
            data = (data.astype(np.float32) - 128) / 128
        return data

    def _read_observation(self):
        # Read the data produced by the server this frame.
        measurements, sensor_data = self.client.read_data()

        # Print some of the measurements.
        if self.config["verbose"]:
            print_measurements(measurements)

        observation = None
        if self.config["use_depth_camera"]:
            camera_name = "CameraDepth"
        else:
            camera_name = "CameraRGB"
        for name, image in sensor_data.items():
            if name == camera_name:
                observation = image

        cur = measurements.player_measurements

        if self.config["enable_planner"]:
            next_command = COMMANDS_ENUM[self.planner.get_next_command(
                [cur.transform.location.x, cur.transform.location.y, GROUND_Z],
                [
                    cur.transform.orientation.x, cur.transform.orientation.y,
                    GROUND_Z
                ],
                [self.end_pos.location.x, self.end_pos.location.y, GROUND_Z], [
                    self.end_pos.orientation.x, self.end_pos.orientation.y,
                    GROUND_Z
                ])]
        else:
            next_command = "LANE_FOLLOW"

        if next_command == "REACH_GOAL":
            distance_to_goal = 0.0  # avoids crash in planner
        elif self.config["enable_planner"]:
            distance_to_goal = self.planner.get_shortest_path_distance([
                cur.transform.location.x, cur.transform.location.y, GROUND_Z
            ], [
                cur.transform.orientation.x, cur.transform.orientation.y,
                GROUND_Z
            ], [self.end_pos.location.x, self.end_pos.location.y, GROUND_Z], [
                self.end_pos.orientation.x, self.end_pos.orientation.y,
                GROUND_Z
            ]) / 100
        else:
            distance_to_goal = -1

        distance_to_goal_euclidean = float(
            np.linalg.norm([
                cur.transform.location.x - self.end_pos.location.x,
                cur.transform.location.y - self.end_pos.location.y
            ]) / 100)

        py_measurements = {
            "episode_id": self.episode_id,
            "step": self.num_steps,
            "x": cur.transform.location.x,
            "y": cur.transform.location.y,
            "x_orient": cur.transform.orientation.x,
            "y_orient": cur.transform.orientation.y,
            "forward_speed": cur.forward_speed,
            "distance_to_goal": distance_to_goal,
            "distance_to_goal_euclidean": distance_to_goal_euclidean,
            "collision_vehicles": cur.collision_vehicles,
            "collision_pedestrians": cur.collision_pedestrians,
            "collision_other": cur.collision_other,
            "intersection_offroad": cur.intersection_offroad,
            "intersection_otherlane": cur.intersection_otherlane,
            "weather": self.weather,
            "map": self.config["server_map"],
            "start_coord": self.start_coord,
            "end_coord": self.end_coord,
            "current_scenario": self.scenario,
            "x_res": self.config["x_res"],
            "y_res": self.config["y_res"],
            "num_vehicles": self.scenario["num_vehicles"],
            "num_pedestrians": self.scenario["num_pedestrians"],
            "max_steps": self.scenario["max_steps"],
            "next_command": next_command,
        }

        if CARLA_OUT_PATH and self.config["log_images"]:
            for name, image in sensor_data.items():
                out_dir = os.path.join(CARLA_OUT_PATH, name)
                if not os.path.exists(out_dir):
                    os.makedirs(out_dir)
                out_file = os.path.join(
                    out_dir, "{}_{:>04}.jpg".format(self.episode_id,
                                                    self.num_steps))
                scipy.misc.imsave(out_file, image.data)

        assert observation is not None, sensor_data
        return observation, py_measurements
Esempio n. 15
0
class DrivingBenchmark(object):
    """
    The Benchmark class, controls the execution of the benchmark interfacing
    an Agent class with a set Suite.


    The benchmark class must be inherited with a class that defines the
    all the experiments to be run by the agent
    """

    def __init__(
            self,
            city_name='Town01',
            name_to_save='Test',
            continue_experiment=False,
            save_images=False,
            distance_for_success=2.0
    ):

        self.__metaclass__ = abc.ABCMeta

        self._city_name = city_name
        self._base_name = name_to_save
        # The minimum distance for arriving into the goal point in
        # order to consider ir a success
        self._distance_for_success = distance_for_success
        # The object used to record the benchmark and to able to continue after
        self._recording = Recording(name_to_save=name_to_save,
                                    continue_experiment=continue_experiment,
                                    save_images=save_images
                                    )

        # We have a default planner instantiated that produces high level commands
        self._planner = Planner(city_name)

        self.carla_map = CarlaMap(city_name, 0.1653, 50)

    def benchmark_agent(self, experiment_suite, agent, client):
        """
        Function to benchmark the agent.
        It first check the log file for this benchmark.
        if it exist it continues from the experiment where it stopped.


        Args:
            experiment_suite
            agent: an agent object with the run step class implemented.
            client:


        Return:
            A dictionary with all the metrics computed from the
            agent running the set of experiments.
        """

        # Instantiate a metric object that will be used to compute the metrics for
        # the benchmark afterwards.
        metrics_object = Metrics(experiment_suite.metrics_parameters,
                                 experiment_suite.dynamic_tasks)

        # Function return the current pose and task for this benchmark.
        start_pose, start_experiment = self._recording.get_pose_and_experiment(
            experiment_suite.get_number_of_poses_task())

        logging.info('START')

        for experiment in experiment_suite.get_experiments()[int(start_experiment):]:

            positions = client.load_settings(
                experiment.conditions).player_start_spots

            self._recording.log_start(experiment.task)

            for pose in experiment.poses[start_pose:]:
                for rep in range(experiment.repetitions):

                    start_index = pose[0]
                    end_index = pose[1]

                    client.start_episode(start_index)
                    # Print information on
                    logging.info('======== !!!! ==========')
                    logging.info(' Start Position %d End Position %d ',
                                 start_index, end_index)

                    self._recording.log_poses(start_index, end_index,
                                              experiment.Conditions.WeatherId)

                    # Calculate the initial distance for this episode
                    initial_distance = \
                        sldist(
                            [positions[start_index].location.x, positions[start_index].location.y],
                            [positions[end_index].location.x, positions[end_index].location.y])

                    time_out = experiment_suite.calculate_time_out(
                        self._get_shortest_path(positions[start_index], positions[end_index]))

                    # running the agent
                    (result, reward_vec, control_vec, final_time, remaining_distance) = \
                        self._run_navigation_episode(
                            agent, client, time_out, positions[end_index],
                            str(experiment.Conditions.WeatherId) + '_'
                            + str(experiment.task) + '_' + str(start_index)
                            + '.' + str(end_index))

                    # Write the general status of the just ran episode
                    self._recording.write_summary_results(
                        experiment, pose, rep, initial_distance,
                        remaining_distance, final_time, time_out, result)

                    # Write the details of this episode.
                    self._recording.write_measurements_results(experiment, rep, pose, reward_vec,
                                                               control_vec)
                    if result > 0:
                        logging.info('+++++ Target achieved in %f seconds! +++++',
                                     final_time)
                    else:
                        logging.info('----- Timeout! -----')

            start_pose = 0

        self._recording.log_end()

        return metrics_object.compute(self._recording.path)

    def get_path(self):
        """
        Returns the path were the log was saved.
        """
        return self._recording.path

    def _get_directions(self, current_point, end_point):
        """
        Class that should return the directions to reach a certain goal
        """

        directions = self._planner.get_next_command(
            (current_point.location.x,
             current_point.location.y, 0.22),
            (current_point.orientation.x,
             current_point.orientation.y,
             current_point.orientation.z),
            (end_point.location.x, end_point.location.y, 0.22),
            (end_point.orientation.x, end_point.orientation.y, end_point.orientation.z))
        return directions

    def _get_shortest_path(self, start_point, end_point):
        """
        Calculates the shortest path between two points considering the road netowrk
        """

        return self._planner.get_shortest_path_distance(
            [
                start_point.location.x, start_point.location.y, 0.22], [
                start_point.orientation.x, start_point.orientation.y, 0.22], [
                end_point.location.x, end_point.location.y, end_point.location.z], [
                end_point.orientation.x, end_point.orientation.y, end_point.orientation.z])

    def load_empty_trajectory(self):
        original_path = "./drive_interfaces/carla/carla_client/carla/planner/" + self._city_name + ".png"
        self.trajectory_img = cv2.imread(original_path)

    def update_trajectory(self, curr_x, curr_y, tar_x, tar_y, is_first, directions):
        cur = self.carla_map.convert_to_pixel([curr_x, curr_y, .22])
        cur = [int(cur[1]), int(cur[0])]
        tar = self.carla_map.convert_to_pixel([tar_x, tar_y, .22])
        tar = [int(tar[1]), int(tar[0])]
        print("current location", cur, "final location", tar)
        if is_first:
            trj_sz = 10
        else:
            trj_sz = 3
        directions_to_color={0.0: [255, 0, 0], 2.0: [255, 0, 0], 5.0: [255, 0, 0],
                             3.0: [0, 255, 0], 4.0: [0, 0, 255]}
        color = directions_to_color[directions]
        self.trajectory_img[cur[0] - trj_sz:cur[0] + trj_sz, cur[1] - trj_sz:cur[1] + trj_sz, 0] = color[0]
        self.trajectory_img[cur[0] - trj_sz:cur[0] + trj_sz, cur[1] - trj_sz:cur[1] + trj_sz, 1] = color[1]
        self.trajectory_img[cur[0] - trj_sz:cur[0] + trj_sz, cur[1] - trj_sz:cur[1] + trj_sz, 2] = color[2]
        tar_sz = 10
        self.trajectory_img[tar[0] - tar_sz:tar[0] + tar_sz, tar[1] - tar_sz:tar[1] + tar_sz, 0] = 0
        self.trajectory_img[tar[0] - tar_sz:tar[0] + tar_sz, tar[1] - tar_sz:tar[1] + tar_sz, 1] = 0
        self.trajectory_img[tar[0] - tar_sz:tar[0] + tar_sz, tar[1] - tar_sz:tar[1] + tar_sz, 2] = 0

    def save_trajectory_image(self, episode_name):
        out_name = os.path.join(self._recording._path, '_images/episode_{:s}_trajectory.png'.format(episode_name))
        folder = os.path.dirname(out_name)
        if not os.path.isdir(folder):
            os.makedirs(folder)
        cv2.imwrite(out_name, self.trajectory_img)


    def measurements_to_pos_yaw(self, measurements):
        pos = [measurements.player_measurements.transform.location.x,
               measurements.player_measurements.transform.location.y ]

        ori = [measurements.player_measurements.transform.orientation.x,
               measurements.player_measurements.transform.orientation.y,
               measurements.player_measurements.transform.orientation.z ]

        city_name = {"Town01": "01",
                     "Town02": "02",
                     "RFS_MAP": "10",
                     "Exp_Town": "11",
                     "Exp_Town02": "11",
                     "Exp_Town01_02Shoulder": "13"}[self._city_name]
        return {"town_id": city_name, "pos": pos, "ori": ori}

    def _run_navigation_episode(
            self,
            agent,
            client,
            time_out,
            target,
            episode_name):
        """
         Run one episode of the benchmark (Pose) for a certain agent.


        Args:
            agent: the agent object
            client: an object of the carla client to communicate
            with the CARLA simulator
            time_out: the time limit to complete this episode
            target: the target to reach
            episode_name: The name for saving images of this episode

        """

        # Send an initial command.
        measurements, sensor_data = client.read_data()
        client.send_control(VehicleControl())

        initial_timestamp = measurements.game_timestamp
        current_timestamp = initial_timestamp

        # The vector containing all measurements produced on this episode
        measurement_vec = []
        # The vector containing all controls produced on this episode
        control_vec = []
        frame = 0
        distance = 10000
        success = False

        self.load_empty_trajectory()
        agent.debug_i = 0
        agent.temp_image_path = self._recording._path

        stuck_counter = 0
        pre_x = 0.0
        pre_y = 0.0
        last_collision_ago = 1000000000
        is_first = True

        while (current_timestamp - initial_timestamp) < (time_out * 1000) and not success:

            # Read data from server with the client
            measurements, sensor_data = client.read_data()
            # The directions to reach the goal are calculated.
            directions = self._get_directions(measurements.player_measurements.transform, target)
            # Agent process the data.
            control = agent.run_step(measurements, sensor_data, directions, target, self.measurements_to_pos_yaw(measurements))
            # Send the control commands to the vehicle
            client.send_control(control)

            # save images if the flag is activated
            #sensor_data = self._recording.yang_annotate_images(sensor_data, directions)
            #self._recording.save_images(sensor_data, episode_name, frame)

            current_x = measurements.player_measurements.transform.location.x
            current_y = measurements.player_measurements.transform.location.y

            logging.info("Controller is Inputting:")
            logging.info('Steer = %f Throttle = %f Brake = %f ',
                         control.steer, control.throttle, control.brake)

            # game timestamp is in microsecond
            current_timestamp = measurements.game_timestamp
            # Get the distance travelled until now
            distance = sldist([current_x, current_y],
                              [target.location.x, target.location.y])

            self.update_trajectory(current_x, current_y, target.location.x, target.location.y, is_first, directions)
            is_first = False

            # Write status of the run on verbose mode
            logging.info('Status:')
            logging.info(
                '[d=%f] c_x = %f, c_y = %f ---> t_x = %f, t_y = %f',
                float(distance), current_x, current_y, target.location.x,
                target.location.y)
            # Check if reach the target
            if distance < self._distance_for_success:
                success = True

            # Increment the vectors and append the measurements and controls.
            frame += 1
            measurement_vec.append(measurements.player_measurements)
            control_vec.append(control)

            pm = measurements.player_measurements
            if pm.collision_vehicles    > 0.0 \
              or pm.collision_pedestrians > 0.0 \
              or pm.collision_other       > 0.0:
                last_collision_ago = 0
            else:
                last_collision_ago += 1

            if sldist([current_x, current_y], [pre_x, pre_y]) < 0.1:
                stuck_counter += 1
            else:
                stuck_counter = 0
            pre_x = current_x
            pre_y = current_y
            if stuck_counter > 400:
                print("breaking because of stucking too long")
                break

            if stuck_counter > 30 and last_collision_ago < 50:
                #print("breaking because of collision and stuck")
                #break
                # disable the breaking
                pass

            if math.fabs(directions) < 0.1:
                # The goal state is reached.
                success = True

        # convert the images saved by the underlying to a video
        if self._recording._save_images:
            # convert the saved images to a video, and store it in the right place
            # we have to save the image within the carla_machine, since only that will know what's the cutting plane
            # assume that the images lies around at ./temp/%05d.png
            print("converting images to a video...")
            out_name = os.path.join(self._recording._path, '_images/episode_{:s}.mp4'.format(episode_name))

            folder = os.path.dirname(out_name)
            if not os.path.isdir(folder):
                os.makedirs(folder)

            cmd = ["ffmpeg", "-y", "-i",  agent.temp_image_path+"/%09d.png", "-c:v", "libx264", out_name]
            call(" ".join(cmd), shell=True)

            cmd = ["find", agent.temp_image_path, "-name", "00*png", "-print | xargs rm"]

            call(" ".join(cmd), shell=True)

            self.save_trajectory_image(episode_name)

        if success:
            return 1, measurement_vec, control_vec, float(
                current_timestamp - initial_timestamp) / 1000.0, distance
        return 0, measurement_vec, control_vec, time_out, distance
Esempio n. 16
0
class CarlaEnv(gym.Env):
    def __init__(self, config=ENV_CONFIG):
        self.config = config
        self.city = self.config["server_map"].split("/")[-1]
        if self.config["enable_planner"]:
            self.planner = Planner(self.city)

        if config["discrete_actions"]:
            self.action_space = Discrete(len(DISCRETE_ACTIONS))
        else:
            self.action_space = Box(-1.0, 1.0, shape=(2, ), dtype=np.float32)
        if config["use_depth_camera"]:
            image_space = Box(-1.0,
                              1.0,
                              shape=(config["y_res"], config["x_res"],
                                     1 * config["framestack"]),
                              dtype=np.float32)
        else:
            image_space = Box(0,
                              255,
                              shape=(config["y_res"], config["x_res"],
                                     3 * config["framestack"]),
                              dtype=np.uint8)
        self.observation_space = Tuple(  # forward_speed, dist to goal
            [
                image_space,
                Discrete(len(COMMANDS_ENUM)),  # next_command
                Box(-128.0, 128.0, shape=(2, ), dtype=np.float32)
            ])

        # TODO(ekl) this isn't really a proper gym spec
        self._spec = lambda: None
        self._spec.id = "Carla-v0"

        self.server_port = None
        self.server_process = None
        self.client = None
        self.num_steps = 0
        self.total_reward = 0
        self.prev_measurement = None
        self.prev_image = None
        self.episode_id = None
        self.measurements_file = None
        self.weather = None
        self.scenario = None
        self.start_pos = None
        self.end_pos = None
        self.start_coord = None
        self.end_coord = None
        self.last_obs = None

    def init_server(self):
        print("Initializing new Carla server...")
        # Create a new server process and start the client.
        self.server_port = random.randint(10000, 60000)
        self.server_process = subprocess.Popen([
            SERVER_BINARY, self.config["server_map"], "-windowed", "-ResX=800",
            "-ResY=600", "-carla-server", "-benchmark -fps=10",
            "-carla-world-port={}".format(self.server_port)
        ],
                                               preexec_fn=os.setsid,
                                               stdout=open(os.devnull, "w"))
        live_carla_processes.add(os.getpgid(self.server_process.pid))

        for i in range(RETRIES_ON_ERROR):
            try:
                self.client = CarlaClient("localhost", self.server_port)
                return self.client.connect()
            except Exception as e:
                print("Error connecting: {}, attempt {}".format(e, i))
                time.sleep(2)

    def clear_server_state(self):
        print("Clearing Carla server state")
        try:
            if self.client:
                self.client.disconnect()
                self.client = None
        except Exception as e:
            print("Error disconnecting client: {}".format(e))
            pass
        if self.server_process:
            pgid = os.getpgid(self.server_process.pid)
            os.killpg(pgid, signal.SIGKILL)
            live_carla_processes.remove(pgid)
            self.server_port = None
            self.server_process = None

    def __del__(self):
        self.clear_server_state()

    def reset(self):
        error = None
        for _ in range(RETRIES_ON_ERROR):
            try:
                if not self.server_process:
                    self.init_server()
                return self._reset()
            except Exception as e:
                print("Error during reset: {}".format(traceback.format_exc()))
                self.clear_server_state()
                error = e
        raise error

    def _reset(self):
        self.num_steps = 0
        self.total_reward = 0
        self.prev_measurement = None
        self.prev_image = None
        self.episode_id = datetime.today().strftime("%Y-%m-%d_%H-%M-%S_%f")
        self.measurements_file = None

        # Create a CarlaSettings object. This object is a wrapper around
        # the CarlaSettings.ini file. Here we set the configuration we
        # want for the new episode.
        settings = CarlaSettings()
        self.scenario = random.choice(self.config["scenarios"])
        assert self.scenario["city"] == self.city, (self.scenario, self.city)
        self.weather = random.choice(self.scenario["weather_distribution"])
        settings.set(SynchronousMode=True,
                     SendNonPlayerAgentsInfo=True,
                     NumberOfVehicles=self.scenario["num_vehicles"],
                     NumberOfPedestrians=self.scenario["num_pedestrians"],
                     WeatherId=self.weather)
        settings.randomize_seeds()

        if self.config["use_depth_camera"]:
            camera1 = Camera("CameraDepth", PostProcessing="Depth")
            camera1.set_image_size(self.config["render_x_res"],
                                   self.config["render_y_res"])
            camera1.set_position(0.1, 0, 1.7)
            settings.add_sensor(camera1)

        camera2 = Camera("CameraRGB")
        camera2.set_image_size(self.config["render_x_res"],
                               self.config["render_y_res"])
        camera2.set_position(0.1, 0, 1.7)
        settings.add_sensor(camera2)

        # Setup start and end positions
        scene = self.client.load_settings(settings)
        positions = scene.player_start_spots
        self.start_pos = positions[self.scenario["start_pos_id"]]
        self.end_pos = positions[self.scenario["end_pos_id"]]
        self.start_coord = [
            self.start_pos.location.x // 100, self.start_pos.location.y // 100
        ]
        self.end_coord = [
            self.end_pos.location.x // 100, self.end_pos.location.y // 100
        ]
        print("Start pos {} ({}), end {} ({})".format(
            self.scenario["start_pos_id"], self.start_coord,
            self.scenario["end_pos_id"], self.end_coord))

        # Notify the server that we want to start the episode at the
        # player_start index. This function blocks until the server is ready
        # to start the episode.
        print("Starting new episode...")
        self.client.start_episode(self.scenario["start_pos_id"])

        image, py_measurements = self._read_observation()
        self.prev_measurement = py_measurements
        return self.encode_obs(self.preprocess_image(image), py_measurements)

    def encode_obs(self, image, py_measurements):
        assert self.config["framestack"] in [1, 2]
        prev_image = self.prev_image
        self.prev_image = image
        if prev_image is None:
            prev_image = image
        if self.config["framestack"] == 2:
            image = np.concatenate([prev_image, image], axis=2)
        obs = (image, COMMAND_ORDINAL[py_measurements["next_command"]], [
            py_measurements["forward_speed"],
            py_measurements["distance_to_goal"]
        ])
        self.last_obs = obs
        return obs

    def step(self, action):
        try:
            obs = self._step(action)
            return obs
        except Exception:
            print("Error during step, terminating episode early",
                  traceback.format_exc())
            self.clear_server_state()
            return (self.last_obs, 0.0, True, {})

    def _step(self, action):
        if self.config["discrete_actions"]:
            action = DISCRETE_ACTIONS[int(action)]
        assert len(action) == 2, "Invalid action {}".format(action)
        if self.config["squash_action_logits"]:
            forward = 2 * float(sigmoid(action[0]) - 0.5)
            throttle = float(np.clip(forward, 0, 1))
            brake = float(np.abs(np.clip(forward, -1, 0)))
            steer = 2 * float(sigmoid(action[1]) - 0.5)
        else:
            throttle = float(np.clip(action[0], 0, 1))
            brake = float(np.abs(np.clip(action[0], -1, 0)))
            steer = float(np.clip(action[1], -1, 1))
        reverse = False
        hand_brake = False

        if self.config["verbose"]:
            print("steer", steer, "throttle", throttle, "brake", brake,
                  "reverse", reverse)

        self.client.send_control(steer=steer,
                                 throttle=throttle,
                                 brake=brake,
                                 hand_brake=hand_brake,
                                 reverse=reverse)

        # Process observations
        image, py_measurements = self._read_observation()
        if self.config["verbose"]:
            print("Next command", py_measurements["next_command"])
        if type(action) is np.ndarray:
            py_measurements["action"] = [float(a) for a in action]
        else:
            py_measurements["action"] = action
        py_measurements["control"] = {
            "steer": steer,
            "throttle": throttle,
            "brake": brake,
            "reverse": reverse,
            "hand_brake": hand_brake,
        }
        reward = compute_reward(self, self.prev_measurement, py_measurements)
        self.total_reward += reward
        py_measurements["reward"] = reward
        py_measurements["total_reward"] = self.total_reward
        done = (self.num_steps > self.scenario["max_steps"]
                or py_measurements["next_command"] == "REACH_GOAL"
                or (self.config["early_terminate_on_collision"]
                    and collided_done(py_measurements)))
        py_measurements["done"] = done
        self.prev_measurement = py_measurements

        # Write out measurements to file
        if CARLA_OUT_PATH:
            if not self.measurements_file:
                self.measurements_file = open(
                    os.path.join(
                        CARLA_OUT_PATH,
                        "measurements_{}.json".format(self.episode_id)), "w")
            self.measurements_file.write(json.dumps(py_measurements))
            self.measurements_file.write("\n")
            if done:
                self.measurements_file.close()
                self.measurements_file = None
                if self.config["convert_images_to_video"]:
                    self.images_to_video()

        self.num_steps += 1
        image = self.preprocess_image(image)
        return (self.encode_obs(image, py_measurements), reward, done,
                py_measurements)

    def images_to_video(self):
        videos_dir = os.path.join(CARLA_OUT_PATH, "Videos")
        if not os.path.exists(videos_dir):
            os.makedirs(videos_dir)
        ffmpeg_cmd = (
            "ffmpeg -loglevel -8 -r 60 -f image2 -s {x_res}x{y_res} "
            "-start_number 0 -i "
            "{img}_%04d.jpg -vcodec libx264 {vid}.mp4 && rm -f {img}_*.jpg "
        ).format(x_res=self.config["render_x_res"],
                 y_res=self.config["render_y_res"],
                 vid=os.path.join(videos_dir, self.episode_id),
                 img=os.path.join(CARLA_OUT_PATH, "CameraRGB",
                                  self.episode_id))
        print("Executing ffmpeg command", ffmpeg_cmd)
        subprocess.call(ffmpeg_cmd, shell=True)

    def preprocess_image(self, image):
        if self.config["use_depth_camera"]:
            assert self.config["use_depth_camera"]
            data = (image.data - 0.5) * 2
            data = data.reshape(self.config["render_y_res"],
                                self.config["render_x_res"], 1)
            data = cv2.resize(data,
                              (self.config["x_res"], self.config["y_res"]),
                              interpolation=cv2.INTER_AREA)
            data = np.expand_dims(data, 2)
        else:
            data = image.data.reshape(self.config["render_y_res"],
                                      self.config["render_x_res"], 3)
            data = cv2.resize(data,
                              (self.config["x_res"], self.config["y_res"]),
                              interpolation=cv2.INTER_AREA)
            data = (data.astype(np.float32) - 128) / 128
        return data

    def _read_observation(self):
        # Read the data produced by the server this frame.
        measurements, sensor_data = self.client.read_data()

        # Print some of the measurements.
        if self.config["verbose"]:
            print_measurements(measurements)

        observation = None
        if self.config["use_depth_camera"]:
            camera_name = "CameraDepth"
        else:
            camera_name = "CameraRGB"
        for name, image in sensor_data.items():
            if name == camera_name:
                observation = image

        cur = measurements.player_measurements

        if self.config["enable_planner"]:
            next_command = COMMANDS_ENUM[self.planner.get_next_command(
                [cur.transform.location.x, cur.transform.location.y, GROUND_Z],
                [
                    cur.transform.orientation.x, cur.transform.orientation.y,
                    GROUND_Z
                ],
                [self.end_pos.location.x, self.end_pos.location.y, GROUND_Z], [
                    self.end_pos.orientation.x, self.end_pos.orientation.y,
                    GROUND_Z
                ])]
        else:
            next_command = "LANE_FOLLOW"

        if next_command == "REACH_GOAL":
            distance_to_goal = 0.0  # avoids crash in planner
        elif self.config["enable_planner"]:
            distance_to_goal = self.planner.get_shortest_path_distance([
                cur.transform.location.x, cur.transform.location.y, GROUND_Z
            ], [
                cur.transform.orientation.x, cur.transform.orientation.y,
                GROUND_Z
            ], [self.end_pos.location.x, self.end_pos.location.y, GROUND_Z], [
                self.end_pos.orientation.x, self.end_pos.orientation.y,
                GROUND_Z
            ]) / 100
        else:
            distance_to_goal = -1

        distance_to_goal_euclidean = float(
            np.linalg.norm([
                cur.transform.location.x - self.end_pos.location.x,
                cur.transform.location.y - self.end_pos.location.y
            ]) / 100)

        py_measurements = {
            "episode_id": self.episode_id,
            "step": self.num_steps,
            "x": cur.transform.location.x,
            "y": cur.transform.location.y,
            "x_orient": cur.transform.orientation.x,
            "y_orient": cur.transform.orientation.y,
            "forward_speed": cur.forward_speed,
            "distance_to_goal": distance_to_goal,
            "distance_to_goal_euclidean": distance_to_goal_euclidean,
            "collision_vehicles": cur.collision_vehicles,
            "collision_pedestrians": cur.collision_pedestrians,
            "collision_other": cur.collision_other,
            "intersection_offroad": cur.intersection_offroad,
            "intersection_otherlane": cur.intersection_otherlane,
            "weather": self.weather,
            "map": self.config["server_map"],
            "start_coord": self.start_coord,
            "end_coord": self.end_coord,
            "current_scenario": self.scenario,
            "x_res": self.config["x_res"],
            "y_res": self.config["y_res"],
            "num_vehicles": self.scenario["num_vehicles"],
            "num_pedestrians": self.scenario["num_pedestrians"],
            "max_steps": self.scenario["max_steps"],
            "next_command":
            next_command,  # TODO can we figure some way using this command
        }

        if CARLA_OUT_PATH and self.config["log_images"]:
            for name, image in sensor_data.items():
                out_dir = os.path.join(CARLA_OUT_PATH, name)
                if not os.path.exists(out_dir):
                    os.makedirs(out_dir)
                out_file = os.path.join(
                    out_dir, "{}_{:>04}.jpg".format(self.episode_id,
                                                    self.num_steps))
                scipy.misc.imsave(out_file, image.data)

        assert observation is not None, sensor_data
        return observation, py_measurements
Esempio n. 17
0
class CarlaEnv(gym.Env):
    def __init__(self, config=ENV_CONFIG):
        self.config = config
        self.city = self.config["server_map"].split("/")[-1]
        if self.config["enable_planner"]:
            self.planner = Planner(self.city)

        # The Action Space
        if config["discrete_actions"]:
            self.action_space = Discrete(
                len(DISCRETE_ACTIONS
                    ))  # It will be transformed to continuous 2D action.
        else:
            self.action_space = Box(-1.0, 1.0, shape=(2, ),
                                    dtype=np.float32)  # 2D action.

        if config["use_depth_camera"]:
            image_space = Box(-1.0,
                              1.0,
                              shape=(config["y_res"], config["x_res"],
                                     1 * config["framestack"]),
                              dtype=np.float32)
        else:
            image_space = Box(0,
                              255,
                              shape=(config["y_res"], config["x_res"],
                                     3 * config["framestack"]),
                              dtype=np.uint8)
        # encode_measure ---> rgb + depth + pre_rgb + measurement_encode
        # 3 + 1 + 3 + 1 = 8
        if config["encode_measurement"]:
            image_space = Box(0,
                              255,
                              shape=(config["y_res"], config["x_res"], 8),
                              dtype=np.float32)

        # The Observation Space
        self.observation_space = Tuple([
            image_space,
            Discrete(len(COMMANDS_ENUM)),  # next_command
            Box(-128.0, 128.0, shape=(2, ),
                dtype=np.float32)  # forward_speed, dist to goal
        ])

        # TODO(ekl) this isn't really a proper gym spec
        self._spec = lambda: None
        self._spec.id = "Carla-v0"

        self.server_port = None
        self.server_process = None
        self.client = None
        self.num_steps = 0
        self.total_reward = 0
        self.prev_measurement = None
        self.prev_image = None
        self.episode_id = None
        self.measurements_file = None
        self.weather = None
        self.scenario = None
        self.start_pos = None
        self.end_pos = None
        self.start_coord = None
        self.end_coord = None
        self.last_obs = None

    def init_server(self):
        print("Initializing new Carla server...")
        # Create a new server process and start the client.
        self.server_port = random.randint(1000, 60000)
        self.server_process = subprocess.Popen(
            [
                SERVER_BINARY,
                self.config["server_map"],
                "-windowed",
                "-ResX=800",
                "-ResY=600",
                "-carla-server",
                "-benchmark -fps=10",  #: to run the simulation at a fixed time-step of 0.1 seconds
                "-carla-world-port={}".format(self.server_port)
            ],
            preexec_fn=os.setsid,
            stdout=open(os.devnull, "w"))
        live_carla_processes.add(os.getpgid(self.server_process.pid))

        for i in range(RETRIES_ON_ERROR):
            try:
                self.client = CarlaClient("localhost", self.server_port)
                return self.client.connect()
            except Exception as e:
                print("Error connecting: {}, attempt {}".format(e, i))
                time.sleep(2)

    def clear_server_state(self):
        print("Clearing Carla server state")
        try:
            if self.client:
                self.client.disconnect()
                self.client = None
        except Exception as e:
            print("Error disconnecting client: {}".format(e))
            pass
        if self.server_process:
            pgid = os.getpgid(self.server_process.pid)
            os.killpg(pgid, signal.SIGKILL)
            live_carla_processes.remove(pgid)
            self.server_port = None
            self.server_process = None

    def __del__(
        self
    ):  # the __del__ method will be called when the instance of the class is deleted.(memory is freed.)
        self.clear_server_state()

    def reset(self):
        error = None
        for _ in range(RETRIES_ON_ERROR):
            try:
                if not self.server_process:
                    self.init_server()
                return self._reset()
            except Exception as e:
                print("Error during reset: {}".format(traceback.format_exc()))
                self.clear_server_state()
                error = e
        raise error

    def _reset(self):
        self.num_steps = 0
        self.total_reward = 0
        self.prev_measurement = None
        self.prev_image = None
        self.episode_id = datetime.today().strftime("%Y-%m-%d_%H-%M-%S_%f")
        self.measurements_file = None

        # Create a CarlaSettings object. This object is a wrapper around
        # the CarlaSettings.ini file. Here we set the configuration we
        # want for the new episode.
        settings = CarlaSettings()
        self.scenario = random.choice(self.config["scenarios"])
        assert self.scenario["city"] == self.city, (self.scenario, self.city)
        self.weather = random.choice(self.scenario["weather_distribution"])
        settings.set(
            SynchronousMode=True,
            # ServerTimeOut=10000, # CarlaSettings: no key named 'ServerTimeOut'
            SendNonPlayerAgentsInfo=True,
            NumberOfVehicles=self.scenario["num_vehicles"],
            NumberOfPedestrians=self.scenario["num_pedestrians"],
            WeatherId=self.weather)
        settings.randomize_seeds()

        if self.config["use_depth_camera"]:
            camera1 = Camera("CameraDepth", PostProcessing="Depth")
            camera1.set_image_size(self.config["render_x_res"],
                                   self.config["render_y_res"])
            # camera1.set_position(30, 0, 170)
            camera1.set_position(0.5, 0.0, 1.6)
            # camera1.set_rotation(0.0, 0.0, 0.0)

            settings.add_sensor(camera1)

        if self.config["encode_measurement"]:
            camera1 = Camera("CameraDepth", PostProcessing="Depth")
            camera1.set_image_size(self.config["render_x_res"],
                                   self.config["render_y_res"])
            # camera1.set_position(30, 0, 170)
            camera1.set_position(0.5, 0.0, 1.6)
            camera1.set(FOV=120)
            settings.add_sensor(camera1)

        camera2 = Camera("CameraRGB")
        camera2.set_image_size(self.config["render_x_res"],
                               self.config["render_y_res"])
        camera2.set(FOV=120)
        camera2.set_position(0.5, 0.0, 1.6)
        settings.add_sensor(camera2)

        # Setup start and end positions
        scene = self.client.load_settings(settings)
        positions = scene.player_start_spots
        self.start_pos = positions[self.scenario["start_pos_id"]]
        self.end_pos = positions[self.scenario["end_pos_id"]]
        self.start_coord = [
            self.start_pos.location.x, self.start_pos.location.y
        ]
        self.end_coord = [self.end_pos.location.x, self.end_pos.location.y]
        print("Start pos {} ({}), end {} ({})".format(
            self.scenario["start_pos_id"], [int(x) for x in self.start_coord],
            self.scenario["end_pos_id"], [int(x) for x in self.end_coord]))

        # Notify the server that we want to start the episode at the
        # player_start index. This function blocks until the server is ready
        # to start the episode.
        print("Starting new episode...")
        self.client.start_episode(self.scenario["start_pos_id"])

        # Process observations: self._read_observation() returns image and py_measurements.
        image, py_measurements = self._read_observation()
        self.prev_measurement = py_measurements
        return self.encode_obs(self.preprocess_image(image), py_measurements)

    # rgb depth forward_speed next_comment
    def encode_obs(self, image, py_measurements):
        assert self.config["framestack"] in [1, 2]
        prev_image = self.prev_image
        self.prev_image = image
        if prev_image is None:
            prev_image = image
        if self.config["framestack"] == 2:
            image = np.concatenate([prev_image, image], axis=2)
        if self.config["encode_measurement"]:
            feature_map = np.zeros([4, 4])
            feature_map[1, :] = (py_measurements["forward_speed"] - 15) / 15
            feature_map[2, :] = (
                COMMAND_ORDINAL[py_measurements["next_command"]] - 2) / 2
            feature_map[0, 0] = py_measurements["x_orient"]
            feature_map[0, 1] = py_measurements["y_orient"]
            feature_map[0,
                        2] = (py_measurements["distance_to_goal"] - 170) / 170
            feature_map[0,
                        1] = (py_measurements["distance_to_goal_euclidean"] -
                              170) / 170
            feature_map[3, 0] = (py_measurements["x"] - 50) / 150
            feature_map[3, 1] = (py_measurements["y"] - 50) / 150
            feature_map[3, 2] = (py_measurements["end_coord"][0] - 150) / 150
            feature_map[3, 3] = (py_measurements["end_coord"][1] - 150) / 150
            feature_map = np.tile(feature_map, (24, 24))
            image = np.concatenate(
                [prev_image[:, :, 0:3], image, feature_map[:, :, np.newaxis]],
                axis=2)
            # print("image shape after encoding", image.shape)

        obs = (image, COMMAND_ORDINAL[py_measurements["next_command"]], [
            py_measurements["forward_speed"],
            py_measurements["distance_to_goal"]
        ])
        self.last_obs = obs
        # print('distance to goal', py_measurements["distance_to_goal"])
        # print("speed", py_measurements["forward_speed"])
        return obs

    # TODO:example of py_measurement
    # {'episode_id': '2019-02-22_11-26-36_990083',
    #  'step': 0,
    #  'x': 71.53003692626953,
    #  'y': 302.57000732421875,
    #  'x_orient': -1.0,
    #  'y_orient': -6.4373016357421875e-06,
    #  'forward_speed': -3.7578740032934155e-13,
    #  'distance_to_goal': 0.6572,
    #  'distance_to_goal_euclidean': 0.6200001239776611,
    #  'collision_vehicles': 0.0,
    #  'collision_pedestrians': 0.0,
    #  'collision_other': 0.0,
    #  'intersection_offroad': 0.0,
    #  'intersection_otherlane': 0.0,
    #  'weather': 0,
    #  'map': '/Game/Maps/Town02',
    #  'start_coord': [0.0, 3.0],
    #  'end_coord': [0.0, 3.0],
    #  'current_scenario': {'city': 'Town02',
    #                       'num_vehicles': 0,
    #                       'num_pedestrians': 20,
    #                       'weather_distribution': [0],
    #                       'start_pos_id': 36,
    #                       'end_pos_id': 40,
    #                       'max_steps': 600},
    #  'x_res': 10,
    #  'y_res': 10,
    #  'num_vehicles': 0,
    #  'num_pedestrians': 20,
    #  'max_steps': 600,
    #  'next_command': 'GO_STRAIGHT',
    #  'action': (0, 1),
    #  'control': {'steer': 1.0,
    #              'throttle': 0.0,
    #              'brake': 0.0,
    #              'reverse': False,
    #              'hand_brake': False},
    #  'reward': 0.0,
    #  'total_reward': 0.0,
    #  'done': False}

    def step(self, action):
        try:
            obs = self._step(action)
            return obs
        except Exception:
            print("Error during step, terminating episode early",
                  traceback.format_exc())
            self.clear_server_state()
            return (self.last_obs, 0.0, True, {})

    def _step(self, action):
        if self.config["discrete_actions"]:
            action = DISCRETE_ACTIONS[int(action)]  # Carla action is 2D.
        assert len(action) == 2, "Invalid action {}".format(action)
        if self.config["squash_action_logits"]:
            forward = 2 * float(sigmoid(action[0]) - 0.5)
            throttle = float(np.clip(forward, 0, 1))
            brake = float(np.abs(np.clip(forward, -1, 0)))
            steer = 2 * float(sigmoid(action[1]) - 0.5)
        else:
            throttle = float(np.clip(action[0], 0, 1))
            brake = float(np.abs(np.clip(action[0], -1, 0)))
            steer = float(np.clip(action[1], -1, 1))

        # reverse and hand_brake are disabled.
        reverse = False
        hand_brake = False

        if self.config["verbose"]:
            print("steer", steer, "throttle", throttle, "brake", brake,
                  "reverse", reverse)

        self.client.send_control(steer=steer,
                                 throttle=throttle,
                                 brake=brake,
                                 hand_brake=hand_brake,
                                 reverse=reverse)

        # Process observations: self._read_observation() returns image and py_measurements.
        image, py_measurements = self._read_observation()
        if self.config["verbose"]:
            print("Next command", py_measurements["next_command"])
        if type(action) is np.ndarray:
            py_measurements["action"] = [float(a) for a in action]
        else:
            py_measurements["action"] = action
        py_measurements["control"] = {
            "steer": steer,
            "throttle": throttle,
            "brake": brake,
            "reverse": reverse,
            "hand_brake": hand_brake,
        }

        # compute reward
        reward = compute_reward(self, self.prev_measurement, py_measurements)

        self.total_reward += reward
        py_measurements["reward"] = reward
        py_measurements["total_reward"] = self.total_reward

        # done or not
        done = (self.num_steps > self.scenario["max_steps"]
                or py_measurements["next_command"] == "REACH_GOAL"
                or (self.config["early_terminate_on_collision"]
                    and collided_done(py_measurements)))

        py_measurements["done"] = done
        self.prev_measurement = py_measurements

        # Write out measurements to file
        if self.config["verbose"] and CARLA_OUT_PATH:
            if not self.measurements_file:
                self.measurements_file = open(
                    os.path.join(
                        CARLA_OUT_PATH,
                        "measurements_{}.json".format(self.episode_id)), "w")
            self.measurements_file.write(json.dumps(py_measurements))
            self.measurements_file.write("\n")
            if done:
                self.measurements_file.close()
                self.measurements_file = None
                if self.config["convert_images_to_video"]:
                    self.images_to_video()

        self.num_steps += 1
        image = self.preprocess_image(image)
        return (self.encode_obs(image, py_measurements), reward, done,
                py_measurements)

    def images_to_video(self):
        videos_dir = os.path.join(CARLA_OUT_PATH, "Videos")
        if not os.path.exists(videos_dir):
            os.makedirs(videos_dir)
        ffmpeg_cmd = (
            "ffmpeg -loglevel -8 -r 60 -f image2 -s {x_res}x{y_res} "
            "-start_number 0 -i "
            "{img}_%04d.jpg -vcodec libx264 {vid}.mp4 && rm -f {img}_*.jpg "
        ).format(x_res=self.config["render_x_res"],
                 y_res=self.config["render_y_res"],
                 vid=os.path.join(videos_dir, self.episode_id),
                 img=os.path.join(CARLA_OUT_PATH, "CameraRGB",
                                  self.episode_id))
        print("Executing ffmpeg command", ffmpeg_cmd)
        subprocess.call(ffmpeg_cmd, shell=True)

    def preprocess_image(self, image):
        if self.config["use_depth_camera"]:
            assert self.config["use_depth_camera"]
            data = (image.data - 0.5) * 2
            data = data.reshape(self.config["render_y_res"],
                                self.config["render_x_res"], 1)
            data = cv2.resize(data,
                              (self.config["x_res"], self.config["y_res"]),
                              interpolation=cv2.INTER_AREA)
            data = np.expand_dims(data, 2)
        elif self.config["encode_measurement"]:
            # data = (image - 0.5) * 2
            data = image.reshape(self.config["render_y_res"],
                                 self.config["render_x_res"], -1)
            #data[:, :, 4] = (data[:, :, 0] - 0.5) * 2
            #data[:, :, 0:3] = (data[:, :, 0:2].astype(np.float32) - 128) / 128
            data = cv2.resize(data,
                              (self.config["x_res"], self.config["y_res"]),
                              interpolation=cv2.INTER_AREA)

        else:
            data = image.data.reshape(self.config["render_y_res"],
                                      self.config["render_x_res"], 3)
            data = cv2.resize(data,
                              (self.config["x_res"], self.config["y_res"]),
                              interpolation=cv2.INTER_AREA)
            data = (data.astype(np.float32) - 128) / 128
        return data

    def _read_observation(self):
        # Read the data produced by the server this frame.
        measurements, sensor_data = self.client.read_data()

        # Print some of the measurements.
        if self.config["verbose"]:
            print_measurements(measurements)

        observation = None
        if self.config["encode_measurement"]:
            # print(sensor_data["CameraRGB"].data.shape, sensor_data["CameraDepth"].data.shape)
            observation = np.concatenate(
                ((sensor_data["CameraRGB"].data.astype(np.float32) - 128) /
                 128,
                 (sensor_data["CameraDepth"].data[:, :, np.newaxis] - 0.5) *
                 0.5),
                axis=2)

            # print("observation_shape", observation.shape)
        else:
            if self.config["use_depth_camera"]:
                camera_name = "CameraDepth"

            else:
                camera_name = "CameraRGB"
            for name, image in sensor_data.items():
                if name == camera_name:
                    observation = image

        cur = measurements.player_measurements

        if self.config["enable_planner"]:
            next_command = COMMANDS_ENUM[self.planner.get_next_command(
                [cur.transform.location.x, cur.transform.location.y, GROUND_Z],
                [
                    cur.transform.orientation.x, cur.transform.orientation.y,
                    GROUND_Z
                ],
                [self.end_pos.location.x, self.end_pos.location.y, GROUND_Z], [
                    self.end_pos.orientation.x, self.end_pos.orientation.y,
                    GROUND_Z
                ])]
        else:
            next_command = "LANE_FOLLOW"

        if next_command == "REACH_GOAL":
            distance_to_goal = 0.0  # avoids crash in planner
        elif self.config["enable_planner"]:
            distance_to_goal = self.planner.get_shortest_path_distance(
                [cur.transform.location.x, cur.transform.location.y, GROUND_Z],
                [
                    cur.transform.orientation.x, cur.transform.orientation.y,
                    GROUND_Z
                ],
                [self.end_pos.location.x, self.end_pos.location.y, GROUND_Z], [
                    self.end_pos.orientation.x, self.end_pos.orientation.y,
                    GROUND_Z
                ])
        # now the metrix should be meter carla8.0
        # TODO run experience to verify the scale of distance in order to determine reward function
        else:
            distance_to_goal = -1

        distance_to_goal_euclidean = float(
            np.linalg.norm([
                cur.transform.location.x - self.end_pos.location.x,
                cur.transform.location.y - self.end_pos.location.y
            ]))

        py_measurements = {
            "episode_id": self.episode_id,
            "step": self.num_steps,
            "x": cur.transform.location.x,
            "y": cur.transform.location.y,
            "x_orient": cur.transform.orientation.x,
            "y_orient": cur.transform.orientation.y,
            "forward_speed": cur.forward_speed,
            "distance_to_goal": distance_to_goal,
            "distance_to_goal_euclidean": distance_to_goal_euclidean,
            "collision_vehicles": cur.collision_vehicles,
            "collision_pedestrians": cur.collision_pedestrians,
            "collision_other": cur.collision_other,
            "intersection_offroad": cur.intersection_offroad,
            "intersection_otherlane": cur.intersection_otherlane,
            "weather": self.weather,
            "map": self.config["server_map"],
            "start_coord": self.start_coord,
            "end_coord": self.end_coord,
            "current_scenario": self.scenario,
            "x_res": self.config["x_res"],
            "y_res": self.config["y_res"],
            "num_vehicles": self.scenario["num_vehicles"],
            "num_pedestrians": self.scenario["num_pedestrians"],
            "max_steps": self.scenario["max_steps"],
            "next_command": next_command,
        }

        if CARLA_OUT_PATH and self.config["log_images"]:
            for name, image in sensor_data.items():
                out_dir = os.path.join(CARLA_OUT_PATH, name)
                if not os.path.exists(out_dir):
                    os.makedirs(out_dir)
                out_file = os.path.join(
                    out_dir, "{}_{:>04}.jpg".format(self.episode_id,
                                                    self.num_steps))
                scipy.misc.imsave(out_file, image.data)

        assert observation is not None, sensor_data
        return observation, py_measurements
Esempio n. 18
0
class CarlaEnv(gym.Env):
    def __init__(self, config=ENV_CONFIG):
        """
        Carla Gym Environment class implementation. Creates an OpenAI Gym compatible driving environment based on
        Carla driving simulator.
        :param config: A dictionary with environment configuration keys and values
        """
        self.config = config
        self.city = self.config["server_map"].split("/")[-1]
        if self.config["enable_planner"]:
            self.planner = Planner(self.city)

        if config["discrete_actions"]:
            self.action_space = Discrete(len(DISCRETE_ACTIONS))
        else:
            self.action_space = Box(-1.0, 1.0, shape=(2,), dtype=np.uint8)
        if config["use_depth_camera"]:
            image_space = Box(
                -1.0, 1.0, shape=(
                    config["y_res"], config["x_res"],
                    1 * config["framestack"]), dtype=np.float32)
        else:
            image_space = Box(
                0.0, 255.0, shape=(
                    config["y_res"], config["x_res"],
                    3 * config["framestack"]), dtype=np.float32)
        if self.config["use_image_only_observations"]:
            self.observation_space = image_space
        else:
            self.observation_space = Tuple(
                [image_space,
                 Discrete(len(COMMANDS_ENUM)),  # next_command
                 Box(-128.0, 128.0, shape=(2,), dtype=np.float32)])  # forward_speed, dist to goal

        self._spec = lambda: None
        self._spec.id = "Carla-v0"
        self._seed = ENV_CONFIG["seed"]

        self.server_port = None
        self.server_process = None
        self.client = None
        self.num_steps = 0
        self.total_reward = 0
        self.prev_measurement = None
        self.prev_image = None
        self.episode_id = None
        self.measurements_file = None
        self.weather = None
        self.scenario = None
        self.start_pos = None
        self.end_pos = None
        self.start_coord = None
        self.end_coord = None
        self.last_obs = None
        self.viewer = None

    def render(self, mode=None):
        filename = f'images/id_{self._spec.id}_ep_{self.episode_id}_step_{self.num_steps}'
        self.frame_image.save_to_disk(filename)
        # from gym.envs.classic_control import rendering
        # if self.viewer is None:
        #     self.viewer = rq
        #     endering.SimpleImageViewer()
        # self.viewer.imshow(self.frame_image)
        # return self.viewer.isopen

    def init_server(self):
        print("Initializing new Carla server...")
        # Create a new server process and start the client.
        self.server_port = random.randint(10000, 60000)
        if self.config["render"]:
            self.server_process = subprocess.Popen(
                [SERVER_BINARY, self.config["server_map"],
                 "-windowed", "-ResX=400", "-ResY=300",
                 "-carla-server",
                 "-carla-world-port={}".format(self.server_port)],
                preexec_fn=os.setsid, stdout=open(os.devnull, "w"))
        else:
            self.server_process = subprocess.Popen(
                ("SDL_VIDEODRIVER=offscreen SDL_HINT_CUDA_DEVICE={} {} " +
                 self.config["server_map"] + " -windowed -ResX=400 -ResY=300"
                 " -carla-server -carla-world-port={}").format(0, SERVER_BINARY, self.server_port),
                shell=True, preexec_fn=os.setsid, stdout=open(os.devnull, "w"))

        live_carla_processes.add(os.getpgid(self.server_process.pid))

        for i in range(RETRIES_ON_ERROR):
            try:
                self.client = CarlaClient("localhost", self.server_port)
                return self.client.connect()
            except Exception as e:
                print("Error connecting: {}, attempt {}".format(e, i))
                time.sleep(2)

    def clear_server_state(self):
        print("Clearing Carla server state")
        try:
            if self.client:
                self.client.disconnect()
                self.client = None
        except Exception as e:
            print("Error disconnecting client: {}".format(e))
            pass
        if self.server_process:
            pgid = os.getpgid(self.server_process.pid)
            os.killpg(pgid, signal.SIGKILL)
            live_carla_processes.remove(pgid)
            self.server_port = None
            self.server_process = None

    def __del__(self):
        self.clear_server_state()

    def reset(self):
        error = None
        for _ in range(RETRIES_ON_ERROR):
            try:
                if not self.server_process:
                    self.init_server()
                return self.reset_env()
            except Exception as e:
                print("Error during reset: {}".format(traceback.format_exc()))
                self.clear_server_state()
                error = e
        raise error

    def reset_env(self):
        self.num_steps = 0
        self.total_reward = 0
        self.prev_measurement = None
        self.prev_image = None
        self.episode_id = datetime.today().strftime("%Y-%m-%d_%H-%M-%S_%f")
        self.measurements_file = None

        # Create a CarlaSettings object. This object is a wrapper around
        # the CarlaSettings.ini file. Here we set the configuration we
        # want for the new episode.
        settings = CarlaSettings()
        # If config["scenarios"] is a single scenario, then use it if it's an array of scenarios, randomly choose one and init
        if isinstance(self.config["scenarios"],dict):
            self.scenario = self.config["scenarios"]
        else: #isinstance array of dict
            self.scenario = random.choice(self.config["scenarios"])
        assert self.scenario["city"] == self.city, (self.scenario, self.city)
        self.weather = random.choice(self.scenario["weather_distribution"])
        settings.set(
            SynchronousMode=True,
            SendNonPlayerAgentsInfo=True,
            NumberOfVehicles=self.scenario["num_vehicles"],
            NumberOfPedestrians=self.scenario["num_pedestrians"],
            WeatherId=self.weather)
        settings.randomize_seeds()

        if self.config["use_depth_camera"]:
            camera1 = Camera("CameraDepth", PostProcessing="Depth")
            camera1.set_image_size(
                self.config["render_x_res"], self.config["render_y_res"])
            camera1.set_position(0.30, 0, 1.30)
            settings.add_sensor(camera1)

        camera2 = Camera("CameraRGB")
        camera2.set_image_size(
            self.config["render_x_res"], self.config["render_y_res"])
        camera2.set_position(0.30, 0, 1.30)
        settings.add_sensor(camera2)

        # Setup start and end positions
        scene = self.client.load_settings(settings)
        positions = scene.player_start_spots
        self.start_pos = positions[self.scenario["start_pos_id"]]
        self.end_pos = positions[self.scenario["end_pos_id"]]
        self.start_coord = [
            self.start_pos.location.x // 100, self.start_pos.location.y // 100]
        self.end_coord = [
            self.end_pos.location.x // 100, self.end_pos.location.y // 100]
        print(
            "Start pos {} ({}), end {} ({})".format(
                self.scenario["start_pos_id"], self.start_coord,
                self.scenario["end_pos_id"], self.end_coord))

        # Notify the server that we want to start the episode at the
        # player_start index. This function blocks until the server is ready
        # to start the episode.
        print("Starting new episode...")
        self.client.start_episode(self.scenario["start_pos_id"])

        image, py_measurements = self._read_observation()
        self.prev_measurement = py_measurements
        return self.encode_obs(self.preprocess_image(image), py_measurements)

    def encode_obs(self, image, py_measurements):
        assert self.config["framestack"] in [1, 2]
        prev_image = self.prev_image
        self.prev_image = image
        if prev_image is None:
            prev_image = image
        if self.config["framestack"] == 2:
            image = np.concatenate([prev_image, image], axis=2)
        if self.config["use_image_only_observations"]:
            obs = image
        else:
            obs = (
                image,
                COMMAND_ORDINAL[py_measurements["next_command"]],
                [py_measurements["forward_speed"],
                 py_measurements["distance_to_goal"]])
        self.last_obs = obs
        return obs

    def step(self, action):
        try:
            obs = self.step_env(action)
            return obs
        except Exception:
            print(
                "Error during step, terminating episode early",
                traceback.format_exc())
            self.clear_server_state()
            return (self.last_obs, 0.0, True, {})

    def step_env(self, action):
        if self.config["discrete_actions"]:
            action = DISCRETE_ACTIONS[int(action)]
        assert len(action) == 2, "Invalid action {}".format(action)
        throttle = float(np.clip(action[0], 0, 1))
        brake = float(np.abs(np.clip(action[0], -1, 0)))
        steer = float(np.clip(action[1], -1, 1))
        reverse = False
        hand_brake = False

        if self.config["verbose"]:
            print(
                "steer", steer, "throttle", throttle, "brake", brake,
                "reverse", reverse)

        self.client.send_control(
            steer=steer, throttle=throttle, brake=brake, hand_brake=hand_brake,
            reverse=reverse)

        # Process observations
        image, py_measurements = self._read_observation()
        if self.config["verbose"]:
            print("Next command", py_measurements["next_command"])
        if type(action) is np.ndarray:
            py_measurements["action"] = [float(a) for a in action]
        else:
            py_measurements["action"] = action
        py_measurements["control"] = {
            "steer": steer,
            "throttle": throttle,
            "brake": brake,
            "reverse": reverse,
            "hand_brake": hand_brake,
        }
        reward = self.calculate_reward(py_measurements)
        self.total_reward += reward
        py_measurements["reward"] = reward
        py_measurements["total_reward"] = self.total_reward
        done = (self.num_steps > self.scenario["max_steps"] or
                py_measurements["next_command"] == "REACH_GOAL" or
                (self.config["early_terminate_on_collision"] and
                 check_collision(py_measurements)))
        py_measurements["done"] = done
        self.prev_measurement = py_measurements

        self.num_steps += 1
        image = self.preprocess_image(image)
        return (
            self.encode_obs(image, py_measurements), reward, done,
            py_measurements)


    def preprocess_image(self, image):
        if self.config["use_depth_camera"]:
            assert self.config["use_depth_camera"]
            data = (image.data - 0.5) * 2
            data = data.reshape(
                self.config["render_y_res"], self.config["render_x_res"], 1)
            data = cv2.resize(
                data, (self.config["x_res"], self.config["y_res"]),
                interpolation=cv2.INTER_AREA)
            data = np.expand_dims(data, 2)
        else:
            data = image.data.reshape(
                self.config["render_y_res"], self.config["render_x_res"], 3)
            data = cv2.resize(
                data, (self.config["x_res"], self.config["y_res"]),
                interpolation=cv2.INTER_AREA)
            data = (data.astype(np.float32) - 128) / 128
        return data

    def _read_observation(self):
        # Read the data produced by the server this frame.
        measurements, sensor_data = self.client.read_data()

        # Print some of the measurements.
        if self.config["verbose"]:
            print_measurements(measurements)

        observation = None
        if self.config["use_depth_camera"]:
            camera_name = "CameraDepth"
        else:
            camera_name = "CameraRGB"
        for name, image in sensor_data.items():
            if name == camera_name:
                observation = image
        self.frame_image = observation
        cur = measurements.player_measurements

        if self.config["enable_planner"]:
            next_command = COMMANDS_ENUM[
                self.planner.get_next_command(
                    [cur.transform.location.x, cur.transform.location.y,
                     GROUND_Z],
                    [cur.transform.orientation.x, cur.transform.orientation.y,
                     GROUND_Z],
                    [self.end_pos.location.x, self.end_pos.location.y,
                     GROUND_Z],
                    [self.end_pos.orientation.x, self.end_pos.orientation.y,
                     GROUND_Z])
            ]
        else:
            next_command = "LANE_FOLLOW"

        if next_command == "REACH_GOAL":
            distance_to_goal = 0.0  # avoids crash in planner
        elif self.config["enable_planner"]:
            distance_to_goal = self.planner.get_shortest_path_distance(
                [cur.transform.location.x, cur.transform.location.y, GROUND_Z],
                [cur.transform.orientation.x, cur.transform.orientation.y,
                 GROUND_Z],
                [self.end_pos.location.x, self.end_pos.location.y, GROUND_Z],
                [self.end_pos.orientation.x, self.end_pos.orientation.y,
                 GROUND_Z]) / 100
        else:
            distance_to_goal = -1

        distance_to_goal_euclidean = float(np.linalg.norm(
            [cur.transform.location.x - self.end_pos.location.x,
             cur.transform.location.y - self.end_pos.location.y]) / 100)

        py_measurements = {
            "episode_id": self.episode_id,
            "step": self.num_steps,
            "x": cur.transform.location.x,
            "y": cur.transform.location.y,
            "x_orient": cur.transform.orientation.x,
            "y_orient": cur.transform.orientation.y,
            "forward_speed": cur.forward_speed,
            "distance_to_goal": distance_to_goal,
            "distance_to_goal_euclidean": distance_to_goal_euclidean,
            "collision_vehicles": cur.collision_vehicles,
            "collision_pedestrians": cur.collision_pedestrians,
            "collision_other": cur.collision_other,
            "intersection_offroad": cur.intersection_offroad,
            "intersection_otherlane": cur.intersection_otherlane,
            "weather": self.weather,
            "map": self.config["server_map"],
            "start_coord": self.start_coord,
            "end_coord": self.end_coord,
            "current_scenario": self.scenario,
            "x_res": self.config["x_res"],
            "y_res": self.config["y_res"],
            "num_vehicles": self.scenario["num_vehicles"],
            "num_pedestrians": self.scenario["num_pedestrians"],
            "max_steps": self.scenario["max_steps"],
            "next_command": next_command,
        }


        assert observation is not None, sensor_data
        return observation, py_measurements

    def calculate_reward(self, current_measurement):
        """
        Calculate the reward based on the effect of the action taken using the previous and the current measurements
        :param current_measurement: The measurement obtained from the Carla engine after executing the current action
        :return: The scalar reward
        """
        reward = 0.0

        cur_dist = current_measurement["distance_to_goal"]

        prev_dist = self.prev_measurement["distance_to_goal"]

        if self.config["verbose"]:
            print("Cur dist {}, prev dist {}".format(cur_dist, prev_dist))

        # Distance travelled toward the goal in m
        reward += np.clip(prev_dist - cur_dist, -10.0, 10.0)

        # Change in speed (km/hr)
        reward += 0.05 * (current_measurement["forward_speed"] - self.prev_measurement["forward_speed"])

        # New collision damage
        reward -= .00002 * (
            current_measurement["collision_vehicles"] + current_measurement["collision_pedestrians"] +
            current_measurement["collision_other"] - self.prev_measurement["collision_vehicles"] -
            self.prev_measurement["collision_pedestrians"] - self.prev_measurement["collision_other"])

        # New sidewalk intersection
        reward -= 2 * (
            current_measurement["intersection_offroad"] - self.prev_measurement["intersection_offroad"])

        # New opposite lane intersection
        reward -= 2 * (
            current_measurement["intersection_otherlane"] - self.prev_measurement["intersection_otherlane"])

        return reward
Esempio n. 19
0
class CarlaEnv(gym.Env):

    def __init__(self, config=ENV_CONFIG):
        self.config = config
        self.city = self.config["server_map"].split("/")[-1]
        if self.config["enable_planner"]:
            self.planner = Planner(self.city)

        self.action_space = Discrete(len(DISCRETE_ACTIONS))

        # RGB Camera
        self.frame_shape = (config["y_res"], config["x_res"])
        image_space = Box(
            0, 1, shape=(
                config["y_res"], config["x_res"],
                FRAME_DEPTH * config["framestack"]), dtype=np.float32)
        self.observation_space = image_space

        # TODO(ekl) this isn't really a proper gym spec
        self._spec = lambda: None
        self._spec.id = "Carla-v0"

        self.server_port = None
        self.server_process = None
        self.client = None
        self.num_steps = 0
        self.total_reward = 0
        self.prev_measurement = None
        self.prev_image = None
        self.episode_id = None
        self.measurements_file = None
        self.weather = None
        self.scenario = None
        self.start_pos = None
        self.end_pos = None
        self.start_coord = None
        self.end_coord = None
        self.last_obs = None
        self.last_full_obs = None
        self.framestack = [None] * config["framestack"]
        self.framestack_index = 0
        self.running_restarts = 0
        self.on_step = None
        self.on_next = None
        self.photo_index = 1
        self.episode_index = 0

    def init_server(self):
        print("Initializing new Carla server...")
        # Create a new server process and start the client.
        self.server_port = random.randint(10000, 60000)
        self.server_process = subprocess.Popen(
            [self.config["server_binary"], self.config["server_map"],
             "-windowed",
             "-ResX={}".format(self.config["render_x_res"]),
             "-ResY={}".format(self.config["render_y_res"]),
             "-fps={}".format(self.config["fps"]),
             "-carla-server",
             "-carla-world-port={}".format(self.server_port)],
            preexec_fn=os.setsid, stdout=open(os.devnull, "w"))
        live_carla_processes.add(os.getpgid(self.server_process.pid))

        for i in range(RETRIES_ON_ERROR):
            try:
                self.client = CarlaClient("localhost", self.server_port)
                return self.client.connect()
            except Exception as e:
                print("Error connecting: {}, attempt {}".format(e, i))
                time.sleep(2)

    def clear_server_state(self):
        print("Clearing Carla server state")
        try:
            if self.client:
                self.client.disconnect()
                self.client = None
        except Exception as e:
            print("Error disconnecting client: {}".format(e))
            pass
        if self.server_process:
            pgid = os.getpgid(self.server_process.pid)
            os.killpg(pgid, signal.SIGKILL)
            live_carla_processes.remove(pgid)
            self.server_port = None
            self.server_process = None

    def __del__(self):
        self.clear_server_state()

    def reset(self):
        if self.on_next is not None:
            self.on_next()

        error = None
        self.running_restarts += 1
        for _ in range(RETRIES_ON_ERROR):
            try:
                # Force a full reset of Carla after some number of restarts
                if self.running_restarts > self.config["server_restart_interval"]:
                    print("Shutting down carla server...")
                    self.running_restarts = 0
                    self.clear_server_state()

                # If server down, initialize
                if not self.server_process:
                    self.init_server()

                # Run episode reset
                return self._reset()
            except Exception as e:
                print("Error during reset: {}".format(traceback.format_exc()))
                self.clear_server_state()
                error = e
                time.sleep(5)
        raise error

    def _build_camera(self, name, post):
        camera = Camera(name, PostProcessing=post)
        camera.set_image_size(
            self.config["render_x_res"], self.config["render_y_res"])
        camera.set_position(x=2.4, y=0, z=0.8)
        camera.set_rotation(pitch=-40, roll=0, yaw=0)
        # camera.FOV = 100
        return camera


    def _reset(self):
        self.num_steps = 0
        self.total_reward = 0
        self.episode_index += 1
        self.photo_index = 1
        self.prev_measurement = None
        self.prev_image = None
        self.episode_id = datetime.today().strftime("%Y-%m-%d_%H-%M-%S_%f")
        self.measurements_file = None

        # Create a CarlaSettings object. This object is a wrapper around
        # the CarlaSettings.ini file. Here we set the configuration we
        # want for the new episode.
        settings = CarlaSettings()
        self.scenario = random.choice(self.config["scenarios"])
        assert self.scenario["city"] == self.city, (self.scenario, self.city)
        self.weather = random.choice(self.scenario["weather_distribution"])
        settings.set(
            SynchronousMode=True,
            SendNonPlayerAgentsInfo=False,
            NumberOfVehicles=self.scenario["num_vehicles"],
            NumberOfPedestrians=self.scenario["num_pedestrians"],
            WeatherId=self.weather,
            QualityLevel=self.config["quality"])
        settings.randomize_seeds()

        # Add the cameras
        if self.config["save_images_rgb"]:
            settings.add_sensor(self._build_camera(name="CameraRGB", post="SceneFinal"))
        settings.add_sensor(self._build_camera(name="CameraDepth", post="Depth"))
        settings.add_sensor(self._build_camera(name="CameraClass", post="SemanticSegmentation"))

        # Setup start and end positions
        scene = self.client.load_settings(settings)
        positions = scene.player_start_spots
        self.start_pos = positions[self.scenario["start_pos_id"]]
        self.end_pos = positions[self.scenario["end_pos_id"]]
        self.start_coord = [
            self.start_pos.location.x // 100, self.start_pos.location.y // 100]
        self.end_coord = [
            self.end_pos.location.x // 100, self.end_pos.location.y // 100]
        print(
            "Start pos {} ({}), end {} ({})".format(
                self.scenario["start_pos_id"], self.start_coord,
                self.scenario["end_pos_id"], self.end_coord))

        # Notify the server that we want to start the episode at the
        # player_start index. This function blocks until the server is ready
        # to start the episode.
        print("Starting new episode...")
        self.client.start_episode(self.scenario["start_pos_id"])

        image, py_measurements = self._read_observation()
        py_measurements["control"] = {
            "throttle_brake": 0,
            "steer": 0,
            "throttle": 0,
            "brake": 0,
            "reverse": False,
            "hand_brake": False,
        }
        self.prev_measurement = py_measurements
        return self.encode_obs(self.preprocess_image(image), py_measurements)


    def encode_obs(self, obs, py_measurements):
        new_obs = obs
        num_frames = int(self.config["framestack"])
        if num_frames > 1:
            # Spread out to number of frames
            frame_array = [obs] * num_frames
            for frame_index in range(1, num_frames):
                index = (self.framestack_index - frame_index) % num_frames
                if self.framestack[index] is not None:
                    frame_array[frame_index] = self.framestack[index]

            # Concatenate into a single np array
            new_obs = np.concatenate(frame_array, axis=2)

        # Store frame
        self.framestack[self.framestack_index] = obs
        self.framestack_index = (self.framestack_index + 1) % num_frames
        self.last_obs = obs

        # Return
        return new_obs

    def step(self, action):
        try:
            obs = self._step(action)
            self.last_full_obs = obs
            return obs
        except Exception:
            print(
                "Error during step, terminating episode early",
                traceback.format_exc())
            self.clear_server_state()
            return (self.last_full_obs, 0.0, True, {})

    def _step(self, action):
        action = DISCRETE_ACTIONS[int(action)]
        assert len(action) == 2, "Invalid action {}".format(action)
        if self.config["squash_action_logits"]:
            forward = 2 * float(sigmoid(action[0]) - 0.5)
            throttle = float(np.clip(forward, 0, 1))
            brake = float(np.abs(np.clip(forward, -1, 0)))
            steer = 2 * float(sigmoid(action[1]) - 0.5)
        else:
            throttle = float(np.clip(action[0], 0, 1))
            brake = float(np.abs(np.clip(action[0], -1, 0)))
            steer = float(np.clip(action[1], -1, 1))
        reverse = False
        hand_brake = False

        if self.config["verbose"]:
            print(
                "steer", steer, "throttle", throttle, "brake", brake,
                "reverse", reverse)

        self.client.send_control(
            steer=steer, throttle=throttle, brake=brake, hand_brake=hand_brake,
            reverse=reverse)

        # Process observations
        image, py_measurements = self._read_observation()
        if self.config["verbose"]:
            print("Next command", py_measurements["next_command"])
        if type(action) is np.ndarray:
            py_measurements["action"] = [float(a) for a in action]
        else:
            py_measurements["action"] = action
        py_measurements["control"] = {
            "throttle_brake": action[0],
            "steer": steer,
            "throttle": throttle,
            "brake": brake,
            "reverse": reverse,
            "hand_brake": hand_brake,
        }

        # Done?
        finished = self.num_steps > self.scenario["max_steps"]
        # py_measurements["next_command"] == "REACH_GOAL"
        failed = TERM.compute_termination(self, py_measurements, self.prev_measurement)
        done = finished or failed
        py_measurements["finished"] = finished
        py_measurements["failed"] = failed
        py_measurements["done"] = done

        # Determine Reward
        reward = compute_reward(self, self.prev_measurement, py_measurements)
        self.total_reward += reward
        py_measurements["reward"] = reward
        py_measurements["total_reward"] = self.total_reward
        self.prev_measurement = py_measurements

        # Callback
        if self.on_step is not None:
            self.on_step(py_measurements)

        # Write out measurements to file
        if self.config["carla_out_path"]:
            # Ensure measurements dir exists
            measurements_dir = os.path.join(self.config["carla_out_path"], self.config["measurements_subdir"])
            if not os.path.exists(measurements_dir):
                os.mkdir(measurements_dir)

            if not self.measurements_file:
                self.measurements_file = open(
                    os.path.join(
                        measurements_dir,
                        "m_{}.json".format(self.episode_id)),
                    "w")
            self.measurements_file.write(json.dumps(py_measurements))
            self.measurements_file.write("\n")
            if done:
                self.measurements_file.close()
                self.measurements_file = None
                if self.config["convert_images_to_video"]:
                    self.images_to_video(camera_name="RGB")
                    self.images_to_video(camera_name="Depth")
                    self.images_to_video(camera_name="Class")

        self.num_steps += 1
        image = self.preprocess_image(image)
        return (
            self.encode_obs(image, py_measurements), reward, done,
            py_measurements)

    def preprocess_image(self, image):
        return image

    def _fuse_observations(self, depth, clazz, speed):
        base_shape = (self.config["render_y_res"], self.config["render_x_res"])
        new_shape = (self.config["y_res"], self.config["x_res"])

        # Reduce class
        clazz = reduce_classifications(clazz)

        # Do we need to resize?
        if base_shape[0] is not new_shape[0] and base_shape[1] is not new_shape[1]:
            depth_reshape = depth.reshape(*depth.shape)
            depth = cv2.resize(depth_reshape, (new_shape[1], new_shape[0]))
            clazz = resize_classifications(clazz, new_shape)

        # Fuse with depth!
        obs = fuse_with_depth(clazz, depth, extra_layers=1)

        # Fuse with speed!
        obs[:, :, KEEP_CLASSIFICATIONS] = speed
        return obs, clazz


    def _read_observation(self):
        # Read the data produced by the server this frame.
        measurements, sensor_data = self.client.read_data()
        cur = measurements.player_measurements

        # Print some of the measurements.
        if self.config["verbose"]:
            print_measurements(measurements)

        # Fuse the observation data to create a single observation
        observation, clazzes = self._fuse_observations(
            sensor_data['CameraDepth'].data,
            sensor_data['CameraClass'].data,
            cur.forward_speed)

        if self.config["enable_planner"]:
            next_command = COMMANDS_ENUM[
                self.planner.get_next_command(
                    [cur.transform.location.x, cur.transform.location.y,
                     GROUND_Z],
                    [cur.transform.orientation.x, cur.transform.orientation.y,
                     GROUND_Z],
                    [self.end_pos.location.x, self.end_pos.location.y,
                     GROUND_Z],
                    [self.end_pos.orientation.x, self.end_pos.orientation.y,
                     GROUND_Z])
            ]
        else:
            next_command = "LANE_FOLLOW"

        if next_command == "REACH_GOAL":
            distance_to_goal = 0.0  # avoids crash in planner
        elif self.config["enable_planner"]:
            distance_to_goal = self.planner.get_shortest_path_distance(
                [cur.transform.location.x, cur.transform.location.y, GROUND_Z],
                [cur.transform.orientation.x, cur.transform.orientation.y,
                 GROUND_Z],
                [self.end_pos.location.x, self.end_pos.location.y, GROUND_Z],
                [self.end_pos.orientation.x, self.end_pos.orientation.y,
                 GROUND_Z]) / 100
        else:
            distance_to_goal = -1

        distance_to_goal_euclidean = float(np.linalg.norm(
            [cur.transform.location.x - self.end_pos.location.x,
             cur.transform.location.y - self.end_pos.location.y]) / 100)

        py_measurements = {
            "episode_id": self.episode_id,
            "step": self.num_steps,
            "x": cur.transform.location.x,
            "y": cur.transform.location.y,
            "x_orient": cur.transform.orientation.x,
            "y_orient": cur.transform.orientation.y,
            "forward_speed": cur.forward_speed,
            "distance_to_goal": distance_to_goal,
            "distance_to_goal_euclidean": distance_to_goal_euclidean,
            "collision_vehicles": cur.collision_vehicles,
            "collision_pedestrians": cur.collision_pedestrians,
            "collision_other": cur.collision_other,
            "intersection_offroad": cur.intersection_offroad,
            "intersection_otherlane": cur.intersection_otherlane,
            "weather": self.weather,
            "map": self.config["server_map"],
            "start_coord": self.start_coord,
            "end_coord": self.end_coord,
            "current_scenario": self.scenario,
            "x_res": self.config["x_res"],
            "y_res": self.config["y_res"],
            "num_vehicles": self.scenario["num_vehicles"],
            "num_pedestrians": self.scenario["num_pedestrians"],
            "max_steps": self.scenario["max_steps"],
            "next_command": next_command,
            "applied_penalty": False,
            "total_reward": self.total_reward,
        }

        if self.config["carla_out_path"] \
                and self.num_steps % self.config["save_image_frequency"] == 0\
                and self.num_steps > 15:
            self.take_photo(
                sensor_data=sensor_data,
                class_data=clazzes,
                fused_data=observation
            )

        assert observation is not None, sensor_data
        return observation, py_measurements

    def images_dir(self):
        dir = os.path.join(self.config["carla_out_path"], "images")
        if not os.path.exists(dir):
            os.mkdir(dir)
        return dir

    def episode_dir(self):
        episode_dir = os.path.join(self.images_dir(), "episode_" + str(self.episode_index))
        if not os.path.exists(episode_dir):
            os.mkdir(episode_dir)
        return episode_dir

    def take_photo(self, sensor_data, class_data, fused_data):
        # Get the proper locations\
        episode_dir = self.episode_dir()
        photo_index = self.photo_index
        name_prefix = "episode_{:>04}_step_{:>04}".format(self.episode_index, photo_index)
        self.photo_index += 1

        # Save the image
        if self.config["save_images_rgb"]:
            rgb_image = sensor_data["CameraRGB"]
            image_path = os.path.join(episode_dir, name_prefix + "_rgb.png")
            scipy.misc.imsave(image_path, rgb_image.data)

        # Save the classes
        if self.config["save_images_class"]:
            classes_path = os.path.join(episode_dir, name_prefix + "_class.png")
            scipy.misc.imsave(classes_path, class_data.data)

        # Save the class images
        if self.config["save_images_fusion"]:
            fused_images = fused_to_rgb(fused_data)
            fused_path = os.path.join(episode_dir, name_prefix + "_fused.png")
            scipy.misc.imsave(fused_path, fused_images.data)

    def images_to_video(self, camera_name):
        # Video directory
        videos_dir = os.path.join(self.config["carla_out_path"], "Videos" + camera_name)
        if not os.path.exists(videos_dir):
            os.makedirs(videos_dir)

        # Build command
        video_fps = self.config["fps"] / self.config["saveimage_frequency"]
        ffmpeg_cmd = (
            "ffmpeg -loglevel -8 -r 10 -f image2 -s {x_res}x{y_res} "
            "-start_number 0 -i "
            "{img}_%04d.jpg -vcodec libx264 {vid}.mp4 && rm -f {img}_*.jpg "
        ).format(
            x_res=self.config["render_x_res"],
            y_res=self.config["render_y_res"],
            vid=os.path.join(videos_dir, self.episode_id),
            img=os.path.join(self.config["carla_out_path"], "Camera" + camera_name, self.episode_id))

        # Execute command
        print("Executing ffmpeg command: ", ffmpeg_cmd)
        try:
            subprocess.call(ffmpeg_cmd, shell=True, timeout=60)
        except Exception as ex:
            print("FFMPEG EXPIRED")
            print(ex)