Ejemplo n.º 1
0
class Agent(object):
    def __init__(self, city_name):
        self.__metaclass__ = abc.ABCMeta
        self._planner = Planner(city_name)

    def get_distance(self, start_point, end_point):
        path_distance = self._planner.get_shortest_path_distance(
            [start_point.location.x, start_point.location.y, 22]
            , [start_point.orientation.x, start_point.orientation.y, 22]
            , [end_point.location.x, end_point.location.y, 22]
            , [end_point.orientation.x, end_point.orientation.y, 22])
        # We calculate the timout based on the distance

        return path_distance

    @abc.abstractmethod
    def run_step(self, measurements, sensor_data, target):
        """
Ejemplo n.º 2
0
class CarlaEnv(gym.Env):
    def __init__(self, config=ENV_CONFIG):
        """
        Carla Gym Environment class implementation. Creates an OpenAI Gym compatible driving environment based on
        Carla driving simulator.
        :param config: A dictionary with environment configuration keys and values
        """
        self.config = config
        self.city = self.config["server_map"].split("/")[-1]
        if self.config["enable_planner"]:
            self.planner = Planner(self.city)

        if config["discrete_actions"]:
            self.action_space = Discrete(len(DISCRETE_ACTIONS))
        else:
            self.action_space = Box(-1.0, 1.0, shape=(2,), dtype=np.uint8)
        if config["use_depth_camera"]:
            image_space = Box(
                -1.0, 1.0, shape=(
                    config["y_res"], config["x_res"],
                    1 * config["framestack"]), dtype=np.float32)
        else:
            image_space = Box(
                0.0, 255.0, shape=(
                    config["y_res"], config["x_res"],
                    3 * config["framestack"]), dtype=np.float32)
        if self.config["use_image_only_observations"]:
            self.observation_space = image_space
        else:
            self.observation_space = Tuple(
                [image_space,
                 Discrete(len(COMMANDS_ENUM)),  # next_command
                 Box(-128.0, 128.0, shape=(2,), dtype=np.float32)])  # forward_speed, dist to goal

        self._spec = lambda: None
        self._spec.id = "Carla-v0"
        self._seed = ENV_CONFIG["seed"]

        self.server_port = None
        self.server_process = None
        self.client = None
        self.num_steps = 0
        self.total_reward = 0
        self.prev_measurement = None
        self.prev_image = None
        self.episode_id = None
        self.measurements_file = None
        self.weather = None
        self.scenario = None
        self.start_pos = None
        self.end_pos = None
        self.start_coord = None
        self.end_coord = None
        self.last_obs = None
        self.viewer = None

    def render(self, mode=None):
        filename = f'images/id_{self._spec.id}_ep_{self.episode_id}_step_{self.num_steps}'
        self.frame_image.save_to_disk(filename)
        # from gym.envs.classic_control import rendering
        # if self.viewer is None:
        #     self.viewer = rq
        #     endering.SimpleImageViewer()
        # self.viewer.imshow(self.frame_image)
        # return self.viewer.isopen

    def init_server(self):
        print("Initializing new Carla server...")
        # Create a new server process and start the client.
        self.server_port = random.randint(10000, 60000)
        if self.config["render"]:
            self.server_process = subprocess.Popen(
                [SERVER_BINARY, self.config["server_map"],
                 "-windowed", "-ResX=400", "-ResY=300",
                 "-carla-server",
                 "-carla-world-port={}".format(self.server_port)],
                preexec_fn=os.setsid, stdout=open(os.devnull, "w"))
        else:
            self.server_process = subprocess.Popen(
                ("SDL_VIDEODRIVER=offscreen SDL_HINT_CUDA_DEVICE={} {} " +
                 self.config["server_map"] + " -windowed -ResX=400 -ResY=300"
                 " -carla-server -carla-world-port={}").format(0, SERVER_BINARY, self.server_port),
                shell=True, preexec_fn=os.setsid, stdout=open(os.devnull, "w"))

        live_carla_processes.add(os.getpgid(self.server_process.pid))

        for i in range(RETRIES_ON_ERROR):
            try:
                self.client = CarlaClient("localhost", self.server_port)
                return self.client.connect()
            except Exception as e:
                print("Error connecting: {}, attempt {}".format(e, i))
                time.sleep(2)

    def clear_server_state(self):
        print("Clearing Carla server state")
        try:
            if self.client:
                self.client.disconnect()
                self.client = None
        except Exception as e:
            print("Error disconnecting client: {}".format(e))
            pass
        if self.server_process:
            pgid = os.getpgid(self.server_process.pid)
            os.killpg(pgid, signal.SIGKILL)
            live_carla_processes.remove(pgid)
            self.server_port = None
            self.server_process = None

    def __del__(self):
        self.clear_server_state()

    def reset(self):
        error = None
        for _ in range(RETRIES_ON_ERROR):
            try:
                if not self.server_process:
                    self.init_server()
                return self.reset_env()
            except Exception as e:
                print("Error during reset: {}".format(traceback.format_exc()))
                self.clear_server_state()
                error = e
        raise error

    def reset_env(self):
        self.num_steps = 0
        self.total_reward = 0
        self.prev_measurement = None
        self.prev_image = None
        self.episode_id = datetime.today().strftime("%Y-%m-%d_%H-%M-%S_%f")
        self.measurements_file = None

        # Create a CarlaSettings object. This object is a wrapper around
        # the CarlaSettings.ini file. Here we set the configuration we
        # want for the new episode.
        settings = CarlaSettings()
        # If config["scenarios"] is a single scenario, then use it if it's an array of scenarios, randomly choose one and init
        if isinstance(self.config["scenarios"],dict):
            self.scenario = self.config["scenarios"]
        else: #isinstance array of dict
            self.scenario = random.choice(self.config["scenarios"])
        assert self.scenario["city"] == self.city, (self.scenario, self.city)
        self.weather = random.choice(self.scenario["weather_distribution"])
        settings.set(
            SynchronousMode=True,
            SendNonPlayerAgentsInfo=True,
            NumberOfVehicles=self.scenario["num_vehicles"],
            NumberOfPedestrians=self.scenario["num_pedestrians"],
            WeatherId=self.weather)
        settings.randomize_seeds()

        if self.config["use_depth_camera"]:
            camera1 = Camera("CameraDepth", PostProcessing="Depth")
            camera1.set_image_size(
                self.config["render_x_res"], self.config["render_y_res"])
            camera1.set_position(0.30, 0, 1.30)
            settings.add_sensor(camera1)

        camera2 = Camera("CameraRGB")
        camera2.set_image_size(
            self.config["render_x_res"], self.config["render_y_res"])
        camera2.set_position(0.30, 0, 1.30)
        settings.add_sensor(camera2)

        # Setup start and end positions
        scene = self.client.load_settings(settings)
        positions = scene.player_start_spots
        self.start_pos = positions[self.scenario["start_pos_id"]]
        self.end_pos = positions[self.scenario["end_pos_id"]]
        self.start_coord = [
            self.start_pos.location.x // 100, self.start_pos.location.y // 100]
        self.end_coord = [
            self.end_pos.location.x // 100, self.end_pos.location.y // 100]
        print(
            "Start pos {} ({}), end {} ({})".format(
                self.scenario["start_pos_id"], self.start_coord,
                self.scenario["end_pos_id"], self.end_coord))

        # Notify the server that we want to start the episode at the
        # player_start index. This function blocks until the server is ready
        # to start the episode.
        print("Starting new episode...")
        self.client.start_episode(self.scenario["start_pos_id"])

        image, py_measurements = self._read_observation()
        self.prev_measurement = py_measurements
        return self.encode_obs(self.preprocess_image(image), py_measurements)

    def encode_obs(self, image, py_measurements):
        assert self.config["framestack"] in [1, 2]
        prev_image = self.prev_image
        self.prev_image = image
        if prev_image is None:
            prev_image = image
        if self.config["framestack"] == 2:
            image = np.concatenate([prev_image, image], axis=2)
        if self.config["use_image_only_observations"]:
            obs = image
        else:
            obs = (
                image,
                COMMAND_ORDINAL[py_measurements["next_command"]],
                [py_measurements["forward_speed"],
                 py_measurements["distance_to_goal"]])
        self.last_obs = obs
        return obs

    def step(self, action):
        try:
            obs = self.step_env(action)
            return obs
        except Exception:
            print(
                "Error during step, terminating episode early",
                traceback.format_exc())
            self.clear_server_state()
            return (self.last_obs, 0.0, True, {})

    def step_env(self, action):
        if self.config["discrete_actions"]:
            action = DISCRETE_ACTIONS[int(action)]
        assert len(action) == 2, "Invalid action {}".format(action)
        throttle = float(np.clip(action[0], 0, 1))
        brake = float(np.abs(np.clip(action[0], -1, 0)))
        steer = float(np.clip(action[1], -1, 1))
        reverse = False
        hand_brake = False

        if self.config["verbose"]:
            print(
                "steer", steer, "throttle", throttle, "brake", brake,
                "reverse", reverse)

        self.client.send_control(
            steer=steer, throttle=throttle, brake=brake, hand_brake=hand_brake,
            reverse=reverse)

        # Process observations
        image, py_measurements = self._read_observation()
        if self.config["verbose"]:
            print("Next command", py_measurements["next_command"])
        if type(action) is np.ndarray:
            py_measurements["action"] = [float(a) for a in action]
        else:
            py_measurements["action"] = action
        py_measurements["control"] = {
            "steer": steer,
            "throttle": throttle,
            "brake": brake,
            "reverse": reverse,
            "hand_brake": hand_brake,
        }
        reward = self.calculate_reward(py_measurements)
        self.total_reward += reward
        py_measurements["reward"] = reward
        py_measurements["total_reward"] = self.total_reward
        done = (self.num_steps > self.scenario["max_steps"] or
                py_measurements["next_command"] == "REACH_GOAL" or
                (self.config["early_terminate_on_collision"] and
                 check_collision(py_measurements)))
        py_measurements["done"] = done
        self.prev_measurement = py_measurements

        self.num_steps += 1
        image = self.preprocess_image(image)
        return (
            self.encode_obs(image, py_measurements), reward, done,
            py_measurements)


    def preprocess_image(self, image):
        if self.config["use_depth_camera"]:
            assert self.config["use_depth_camera"]
            data = (image.data - 0.5) * 2
            data = data.reshape(
                self.config["render_y_res"], self.config["render_x_res"], 1)
            data = cv2.resize(
                data, (self.config["x_res"], self.config["y_res"]),
                interpolation=cv2.INTER_AREA)
            data = np.expand_dims(data, 2)
        else:
            data = image.data.reshape(
                self.config["render_y_res"], self.config["render_x_res"], 3)
            data = cv2.resize(
                data, (self.config["x_res"], self.config["y_res"]),
                interpolation=cv2.INTER_AREA)
            data = (data.astype(np.float32) - 128) / 128
        return data

    def _read_observation(self):
        # Read the data produced by the server this frame.
        measurements, sensor_data = self.client.read_data()

        # Print some of the measurements.
        if self.config["verbose"]:
            print_measurements(measurements)

        observation = None
        if self.config["use_depth_camera"]:
            camera_name = "CameraDepth"
        else:
            camera_name = "CameraRGB"
        for name, image in sensor_data.items():
            if name == camera_name:
                observation = image
        self.frame_image = observation
        cur = measurements.player_measurements

        if self.config["enable_planner"]:
            next_command = COMMANDS_ENUM[
                self.planner.get_next_command(
                    [cur.transform.location.x, cur.transform.location.y,
                     GROUND_Z],
                    [cur.transform.orientation.x, cur.transform.orientation.y,
                     GROUND_Z],
                    [self.end_pos.location.x, self.end_pos.location.y,
                     GROUND_Z],
                    [self.end_pos.orientation.x, self.end_pos.orientation.y,
                     GROUND_Z])
            ]
        else:
            next_command = "LANE_FOLLOW"

        if next_command == "REACH_GOAL":
            distance_to_goal = 0.0  # avoids crash in planner
        elif self.config["enable_planner"]:
            distance_to_goal = self.planner.get_shortest_path_distance(
                [cur.transform.location.x, cur.transform.location.y, GROUND_Z],
                [cur.transform.orientation.x, cur.transform.orientation.y,
                 GROUND_Z],
                [self.end_pos.location.x, self.end_pos.location.y, GROUND_Z],
                [self.end_pos.orientation.x, self.end_pos.orientation.y,
                 GROUND_Z]) / 100
        else:
            distance_to_goal = -1

        distance_to_goal_euclidean = float(np.linalg.norm(
            [cur.transform.location.x - self.end_pos.location.x,
             cur.transform.location.y - self.end_pos.location.y]) / 100)

        py_measurements = {
            "episode_id": self.episode_id,
            "step": self.num_steps,
            "x": cur.transform.location.x,
            "y": cur.transform.location.y,
            "x_orient": cur.transform.orientation.x,
            "y_orient": cur.transform.orientation.y,
            "forward_speed": cur.forward_speed,
            "distance_to_goal": distance_to_goal,
            "distance_to_goal_euclidean": distance_to_goal_euclidean,
            "collision_vehicles": cur.collision_vehicles,
            "collision_pedestrians": cur.collision_pedestrians,
            "collision_other": cur.collision_other,
            "intersection_offroad": cur.intersection_offroad,
            "intersection_otherlane": cur.intersection_otherlane,
            "weather": self.weather,
            "map": self.config["server_map"],
            "start_coord": self.start_coord,
            "end_coord": self.end_coord,
            "current_scenario": self.scenario,
            "x_res": self.config["x_res"],
            "y_res": self.config["y_res"],
            "num_vehicles": self.scenario["num_vehicles"],
            "num_pedestrians": self.scenario["num_pedestrians"],
            "max_steps": self.scenario["max_steps"],
            "next_command": next_command,
        }


        assert observation is not None, sensor_data
        return observation, py_measurements

    def calculate_reward(self, current_measurement):
        """
        Calculate the reward based on the effect of the action taken using the previous and the current measurements
        :param current_measurement: The measurement obtained from the Carla engine after executing the current action
        :return: The scalar reward
        """
        reward = 0.0

        cur_dist = current_measurement["distance_to_goal"]

        prev_dist = self.prev_measurement["distance_to_goal"]

        if self.config["verbose"]:
            print("Cur dist {}, prev dist {}".format(cur_dist, prev_dist))

        # Distance travelled toward the goal in m
        reward += np.clip(prev_dist - cur_dist, -10.0, 10.0)

        # Change in speed (km/hr)
        reward += 0.05 * (current_measurement["forward_speed"] - self.prev_measurement["forward_speed"])

        # New collision damage
        reward -= .00002 * (
            current_measurement["collision_vehicles"] + current_measurement["collision_pedestrians"] +
            current_measurement["collision_other"] - self.prev_measurement["collision_vehicles"] -
            self.prev_measurement["collision_pedestrians"] - self.prev_measurement["collision_other"])

        # New sidewalk intersection
        reward -= 2 * (
            current_measurement["intersection_offroad"] - self.prev_measurement["intersection_offroad"])

        # New opposite lane intersection
        reward -= 2 * (
            current_measurement["intersection_otherlane"] - self.prev_measurement["intersection_otherlane"])

        return reward
Ejemplo n.º 3
0
class CarlaEnv(object):
    '''
        An OpenAI Gym Environment for CARLA.
    '''
    def __init__(self,
                 obs_converter,
                 action_converter,
                 env_id,
                 random_seed=0,
                 exp_suite_name='TrainingSuite',
                 reward_class_name='RewardCarla',
                 host='127.0.0.1',
                 port=2000,
                 city_name='Town01',
                 subset=None,
                 video_every=100,
                 video_dir='./video/',
                 distance_for_success=2.0,
                 benchmark=False):

        self.logger = get_carla_logger()
        self.logger.info('Environment {} running in port {}'.format(
            env_id, port))
        self.host, self.port = host, port
        self.id = env_id
        self._obs_converter = obs_converter
        self.observation_space = obs_converter.get_observation_space()
        self._action_converter = action_converter
        self.action_space = self._action_converter.get_action_space()
        if benchmark:
            self._experiment_suite = getattr(experiment_suites_benchmark,
                                             exp_suite_name)(city_name)
        else:
            self._experiment_suite = getattr(experiment_suites,
                                             exp_suite_name)(city_name, subset)
        self._reward = getattr(rewards, reward_class_name)()
        self._experiments = self._experiment_suite.get_experiments()
        self.subset = subset
        self._make_carla_client(host, port)
        self._distance_for_success = distance_for_success
        self._planner = Planner(city_name)
        self.done = False
        self.last_obs = None
        self.last_distance_to_goal = None
        self.last_direction = None
        self.last_measurements = None
        np.random.seed(random_seed)
        self.video_every = video_every
        self.video_dir = video_dir
        self.video_writer = None
        self._success = False
        self._failure_timeout = False
        self._failure_collision = False
        self.benchmark = benchmark
        self.benchmark_index = [0, 0, 0]
        try:
            if not os.path.isdir(self.video_dir):
                os.makedirs(self.video_dir)
        except OSError:
            pass
        self.steps = 0
        self.num_episodes = 0

    def step(self, action):

        if self.done:
            raise ValueError(
                'self.done should always be False when calling step')

        while True:

            try:
                # Send control
                control = self._action_converter.action_to_control(
                    action, self.last_measurements)
                self._client.send_control(control)

                # Gather the observations (including measurements, sensor and directions)
                measurements, sensor_data = self._client.read_data()
                self.last_measurements = measurements
                current_timestamp = measurements.game_timestamp
                distance_to_goal = self._get_distance_to_goal(
                    measurements, self._target)
                self.last_distance_to_goal = distance_to_goal
                directions = self._get_directions(
                    measurements.player_measurements.transform, self._target)
                self.last_direction = directions
                obs = self._obs_converter.convert(measurements, sensor_data,
                                                  directions, self._target,
                                                  self.id)

                if self.video_writer is not None and self.steps % 2 == 0:
                    self._raster_frame(sensor_data, measurements, directions,
                                       obs)

                self.last_obs = obs

            except CameraException:
                self.logger.debug('Camera Exception in step()')
                obs = self.last_obs
                distance_to_goal = self.last_distance_to_goal
                current_timestamp = self.last_measurements.game_timestamp

            except TCPConnectionError as e:
                self.logger.debug(
                    'TCPConnectionError inside step(): {}'.format(e))
                self.done = True
                return self.last_obs, 0.0, True, {'carla-reward': 0.0}

            break

        # Check if terminal state
        timeout = (current_timestamp -
                   self._initial_timestamp) > (self._time_out * 1000)
        collision, _ = self._is_collision(measurements)
        success = distance_to_goal < self._distance_for_success
        if timeout:
            self.logger.debug('Timeout')
            self._failure_timeout = True
        if collision:
            self.logger.debug('Collision')
            self._failure_collision = True
        if success:
            self.logger.debug('Success')
        self.done = timeout or collision or success

        # Get the reward
        env_state = {
            'timeout': timeout,
            'collision': collision,
            'success': success
        }
        reward = self._reward.get_reward(measurements, self._target,
                                         self.last_direction, control,
                                         env_state)

        # Additional information
        info = {'carla-reward': reward}

        self.steps += 1

        return obs, reward, self.done, info

    def reset(self):

        # Loop forever due to TCPConnectionErrors
        while True:
            try:
                self._reward.reset_reward()
                self.done = False
                if self.video_writer is not None:
                    try:
                        self.video_writer.close()
                    except Exception as e:
                        self.logger.debug(
                            'Error when closing video writer in reset')
                        self.logger.error(e)
                    self.video_writer = None
                if self.benchmark:
                    end_indicator = self._new_episode_benchmark()
                    if end_indicator is False:
                        return False
                else:
                    self._new_episode()
                # Hack: Try sleeping so that the server is ready. Reduces the number of TCPErrors
                time.sleep(4)
                # measurements, sensor_data = self._client.read_data()
                self._client.send_control(VehicleControl())
                measurements, sensor_data = self._client.read_data()
                self._initial_timestamp = measurements.game_timestamp
                self.last_measurements = measurements
                self.last_distance_to_goal = self._get_distance_to_goal(
                    measurements, self._target)
                directions = self._get_directions(
                    measurements.player_measurements.transform, self._target)
                self.last_direction = directions
                obs = self._obs_converter.convert(measurements, sensor_data,
                                                  directions, self._target,
                                                  self.id)
                self.last_obs = obs
                self.done = False
                self._success = False
                self._failure_timeout = False
                self._failure_collision = False
                return obs

            except CameraException:
                self.logger.debug('Camera Exception in reset()')
                continue

            except TCPConnectionError as e:
                self.logger.debug('TCPConnectionError in reset()')
                self.logger.error(e)
                # Disconnect and reconnect
                self.disconnect()
                time.sleep(5)
                self._make_carla_client(self.host, self.port)

    def disconnect(self):

        if self.video_writer is not None:
            try:
                self.video_writer.close()
            except Exception as e:
                self.logger.debug(
                    'Error when closing video writer in disconnect')
                self.logger.error(e)
            self.video_writer = None

        self._client.disconnect()

    def _raster_frame(self, sensor_data, measurements, directions, obs):

        frame = sensor_data['CameraRGB'].data.copy()
        cv2.putText(frame,
                    text='Episode number: {:,}'.format(self.num_episodes - 1),
                    org=(50, 50),
                    fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                    fontScale=1.0,
                    color=[0, 0, 0],
                    thickness=2)
        cv2.putText(frame,
                    text='Environment steps: {:,}'.format(self.steps),
                    org=(50, 80),
                    fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                    fontScale=1.0,
                    color=[0, 0, 0],
                    thickness=2)

        REACH_GOAL = 0.0
        GO_STRAIGHT = 5.0
        TURN_RIGHT = 4.0
        TURN_LEFT = 3.0
        LANE_FOLLOW = 2.0
        if np.isclose(directions, REACH_GOAL):
            dir_str = 'REACH GOAL'
        elif np.isclose(directions, GO_STRAIGHT):
            dir_str = 'GO STRAIGHT'
        elif np.isclose(directions, TURN_RIGHT):
            dir_str = 'TURN RIGHT'
        elif np.isclose(directions, TURN_LEFT):
            dir_str = 'TURN LEFT'
        elif np.isclose(directions, LANE_FOLLOW):
            dir_str = 'LANE FOLLOW'
        else:
            raise ValueError(directions)
        cv2.putText(frame,
                    text='Direction: {}'.format(dir_str),
                    org=(50, 110),
                    fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                    fontScale=1.0,
                    color=[0, 0, 0],
                    thickness=2)
        cv2.putText(frame,
                    text='Speed: {:.02f}'.format(
                        measurements.player_measurements.forward_speed * 3.6),
                    org=(50, 140),
                    fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                    fontScale=1.0,
                    color=[0, 0, 0],
                    thickness=2)
        cv2.putText(frame,
                    text='rel_x: {:.02f}, rel_y: {:.02f}'.format(
                        obs['v'][-2].item(), obs['v'][-1].item()),
                    org=(50, 170),
                    fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                    fontScale=1.0,
                    color=[0, 0, 0],
                    thickness=2)
        self.video_writer.writeFrame(frame)

    def _get_distance_to_goal(self, measurements, target):

        current_x = measurements.player_measurements.transform.location.x
        current_y = measurements.player_measurements.transform.location.y
        distance_to_goal = np.linalg.norm(np.array([current_x, current_y]) - \
                            np.array([target.location.x, target.location.y]))
        return distance_to_goal

    def _new_episode(self):
        experiment_idx = np.random.randint(0, len(self._experiments))
        experiment = self._experiments[experiment_idx]
        exp_settings = experiment.conditions
        exp_settings.set(QualityLevel='Low')
        positions = self._client.load_settings(exp_settings).player_start_spots
        idx_pose = np.random.randint(0, len(experiment.poses))
        pose = experiment.poses[idx_pose]
        self.logger.info('Env {} gets experiment {} with pose {}'.format(
            self.id, experiment_idx, idx_pose))
        start_index = pose[0]
        end_index = pose[1]
        self._client.start_episode(start_index)
        self._time_out = self._experiment_suite.calculate_time_out(
            self._get_shortest_path(positions[start_index],
                                    positions[end_index]))
        self._target = positions[end_index]
        self._episode_name = str(experiment.Conditions.WeatherId) + '_' \
                            + str(experiment.task) + '_' + str(start_index) \
                            + '_' + str(end_index)

        if ((self.num_episodes % self.video_every) == 0) and (self.id == 0):
            video_path = os.path.join(
                self.video_dir, '{:08d}_'.format(self.num_episodes) +
                self._episode_name + '.mp4')
            self.logger.info('Writing video at {}'.format(video_path))
            self.video_writer = skvideo.io.FFmpegWriter(
                video_path, inputdict={'-r': '30'}, outputdict={'-r': '30'})
        else:
            self.video_writer = None

        self.num_episodes += 1

    def _new_episode_benchmark(self):
        experiment_idx_past = self.benchmark_index[0]
        pose_idx_past = self.benchmark_index[1]
        repetition_idx_past = self.benchmark_index[2]

        experiment_past = self._experiments[experiment_idx_past]
        poses_past = experiment_past.poses[0:]
        repetition_past = experiment_past.repetitions

        if repetition_idx_past == repetition_past:
            if pose_idx_past == len(poses_past) - 1:
                if experiment_idx_past == len(self._experiments) - 1:
                    return False
                else:
                    experiment = self._experiments[experiment_idx_past + 1]
                    pose = experiment.poses[0:][0]
                    self.benchmark_index = [experiment_idx_past + 1, 0, 1]
            else:
                experiment = experiment_past
                pose = poses_past[pose_idx_past + 1]
                self.benchmark_index = [
                    experiment_idx_past, pose_idx_past + 1, 1
                ]
        else:
            experiment = experiment_past
            pose = poses_past[pose_idx_past]
            self.benchmark_index = [
                experiment_idx_past, pose_idx_past, repetition_idx_past + 1
            ]
        exp_settings = experiment.Conditions
        exp_settings.set(QualityLevel='Low')
        positions = self._client.load_settings(exp_settings).player_start_spots
        start_index = pose[0]
        end_index = pose[1]
        self._client.start_episode(start_index)
        self._time_out = self._experiment_suite.calculate_time_out(
            self._get_shortest_path(positions[start_index],
                                    positions[end_index]))
        self._target = positions[end_index]
        self._episode_name = str(experiment.Conditions.WeatherId) + '_' \
                            + str(experiment.task) + '_' + str(start_index) \
                            + '_' + str(end_index)
        if ((self.num_episodes % self.video_every) == 0) and (self.id == 0):
            video_path = os.path.join(
                self.video_dir, '{:08d}_'.format(self.num_episodes) +
                self._episode_name + '.mp4')
            self.logger.info('Writing video at {}'.format(video_path))
            self.video_writer = skvideo.io.FFmpegWriter(
                video_path, inputdict={'-r': '30'}, outputdict={'-r': '30'})
        else:
            self.video_writer = None

        self.num_episodes += 1

    def _get_directions(self, current_point, end_point):

        directions = self._planner.get_next_command(
            (current_point.location.x, current_point.location.y, 0.22),
            (current_point.orientation.x, current_point.orientation.y,
             current_point.orientation.z),
            (end_point.location.x, end_point.location.y, 0.22),
            (end_point.orientation.x, end_point.orientation.y,
             end_point.orientation.z))
        return directions

    def _get_shortest_path(self, start_point, end_point):

        return self._planner.get_shortest_path_distance(
            [start_point.location.x, start_point.location.y, 0.22],
            [start_point.orientation.x, start_point.orientation.y, 0.22],
            [end_point.location.x, end_point.location.y, end_point.location.z],
            [
                end_point.orientation.x, end_point.orientation.y,
                end_point.orientation.z
            ])

    @staticmethod
    def _is_collision(measurements):

        c = 0
        c += measurements.player_measurements.collision_vehicles
        c += measurements.player_measurements.collision_pedestrians
        c += measurements.player_measurements.collision_other

        sidewalk_intersection = measurements.player_measurements.intersection_offroad

        otherlane_intersection = measurements.player_measurements.intersection_otherlane

        return (c > 1e-9) or (sidewalk_intersection >
                              0.01) or (otherlane_intersection > 0.9), c

    def _make_carla_client(self, host, port):

        while True:
            try:
                self.logger.info(
                    "Trying to make client on port {}".format(port))
                self._client = CarlaClient(host, port, timeout=100)
                self._client.connect()
                self._client.load_settings(CarlaSettings(QualityLevel='Low'))
                self._client.start_episode(0)
                self.logger.info(
                    "Successfully made client on port {}".format(port))
                break
            except TCPConnectionError as error:
                self.logger.debug('Got TCPConnectionError..sleeping for 1')
                self.logger.error(error)
                time.sleep(1)
Ejemplo n.º 4
0
class CarlaEnv(gym.Env):
    def __init__(self, config=ENV_CONFIG):
        self.config = config
        self.city = self.config["server_map"].split("/")[-1]
        if self.config["enable_planner"]:
            self.planner = Planner(self.city)

        if config["discrete_actions"]:
            self.action_space = Discrete(len(DISCRETE_ACTIONS))
        else:
            self.action_space = Box(-1.0, 1.0, shape=(2, ), dtype=np.float32)
        if config["use_depth_camera"]:
            image_space = Box(-1.0,
                              1.0,
                              shape=(config["y_res"], config["x_res"],
                                     1 * config["framestack"]),
                              dtype=np.float32)
        else:
            image_space = Box(0,
                              255,
                              shape=(config["y_res"], config["x_res"],
                                     3 * config["framestack"]),
                              dtype=np.uint8)
        self.observation_space = Tuple(  # forward_speed, dist to goal
            [
                image_space,
                Discrete(len(COMMANDS_ENUM)),  # next_command
                Box(-128.0, 128.0, shape=(2, ), dtype=np.float32)
            ])

        # TODO(ekl) this isn't really a proper gym spec
        self._spec = lambda: None
        self._spec.id = "Carla-v0"

        self.server_port = None
        self.server_process = None
        self.client = None
        self.num_steps = 0
        self.total_reward = 0
        self.prev_measurement = None
        self.prev_image = None
        self.episode_id = None
        self.measurements_file = None
        self.weather = None
        self.scenario = None
        self.start_pos = None
        self.end_pos = None
        self.start_coord = None
        self.end_coord = None
        self.last_obs = None

    def init_server(self):
        print("Initializing new Carla server...")
        # Create a new server process and start the client.
        self.server_port = random.randint(10000, 60000)
        self.server_process = subprocess.Popen([
            SERVER_BINARY, self.config["server_map"], "-windowed", "-ResX=800",
            "-ResY=600", "-carla-server", "-benchmark -fps=10",
            "-carla-world-port={}".format(self.server_port)
        ],
                                               preexec_fn=os.setsid,
                                               stdout=open(os.devnull, "w"))
        live_carla_processes.add(os.getpgid(self.server_process.pid))

        for i in range(RETRIES_ON_ERROR):
            try:
                self.client = CarlaClient("localhost", self.server_port)
                return self.client.connect()
            except Exception as e:
                print("Error connecting: {}, attempt {}".format(e, i))
                time.sleep(2)

    def clear_server_state(self):
        print("Clearing Carla server state")
        try:
            if self.client:
                self.client.disconnect()
                self.client = None
        except Exception as e:
            print("Error disconnecting client: {}".format(e))
            pass
        if self.server_process:
            pgid = os.getpgid(self.server_process.pid)
            os.killpg(pgid, signal.SIGKILL)
            live_carla_processes.remove(pgid)
            self.server_port = None
            self.server_process = None

    def __del__(self):
        self.clear_server_state()

    def reset(self):
        error = None
        for _ in range(RETRIES_ON_ERROR):
            try:
                if not self.server_process:
                    self.init_server()
                return self._reset()
            except Exception as e:
                print("Error during reset: {}".format(traceback.format_exc()))
                self.clear_server_state()
                error = e
        raise error

    def _reset(self):
        self.num_steps = 0
        self.total_reward = 0
        self.prev_measurement = None
        self.prev_image = None
        self.episode_id = datetime.today().strftime("%Y-%m-%d_%H-%M-%S_%f")
        self.measurements_file = None

        # Create a CarlaSettings object. This object is a wrapper around
        # the CarlaSettings.ini file. Here we set the configuration we
        # want for the new episode.
        settings = CarlaSettings()
        self.scenario = random.choice(self.config["scenarios"])
        assert self.scenario["city"] == self.city, (self.scenario, self.city)
        self.weather = random.choice(self.scenario["weather_distribution"])
        settings.set(SynchronousMode=True,
                     SendNonPlayerAgentsInfo=True,
                     NumberOfVehicles=self.scenario["num_vehicles"],
                     NumberOfPedestrians=self.scenario["num_pedestrians"],
                     WeatherId=self.weather)
        settings.randomize_seeds()

        if self.config["use_depth_camera"]:
            camera1 = Camera("CameraDepth", PostProcessing="Depth")
            camera1.set_image_size(self.config["render_x_res"],
                                   self.config["render_y_res"])
            camera1.set_position(0.1, 0, 1.7)
            settings.add_sensor(camera1)

        camera2 = Camera("CameraRGB")
        camera2.set_image_size(self.config["render_x_res"],
                               self.config["render_y_res"])
        camera2.set_position(0.1, 0, 1.7)
        settings.add_sensor(camera2)

        # Setup start and end positions
        scene = self.client.load_settings(settings)
        positions = scene.player_start_spots
        self.start_pos = positions[self.scenario["start_pos_id"]]
        self.end_pos = positions[self.scenario["end_pos_id"]]
        self.start_coord = [
            self.start_pos.location.x // 100, self.start_pos.location.y // 100
        ]
        self.end_coord = [
            self.end_pos.location.x // 100, self.end_pos.location.y // 100
        ]
        print("Start pos {} ({}), end {} ({})".format(
            self.scenario["start_pos_id"], self.start_coord,
            self.scenario["end_pos_id"], self.end_coord))

        # Notify the server that we want to start the episode at the
        # player_start index. This function blocks until the server is ready
        # to start the episode.
        print("Starting new episode...")
        self.client.start_episode(self.scenario["start_pos_id"])

        image, py_measurements = self._read_observation()
        self.prev_measurement = py_measurements
        return self.encode_obs(self.preprocess_image(image), py_measurements)

    def encode_obs(self, image, py_measurements):
        assert self.config["framestack"] in [1, 2]
        prev_image = self.prev_image
        self.prev_image = image
        if prev_image is None:
            prev_image = image
        if self.config["framestack"] == 2:
            image = np.concatenate([prev_image, image], axis=2)
        obs = (image, COMMAND_ORDINAL[py_measurements["next_command"]], [
            py_measurements["forward_speed"],
            py_measurements["distance_to_goal"]
        ])
        self.last_obs = obs
        return obs

    def step(self, action):
        try:
            obs = self._step(action)
            return obs
        except Exception:
            print("Error during step, terminating episode early",
                  traceback.format_exc())
            self.clear_server_state()
            return (self.last_obs, 0.0, True, {})

    def _step(self, action):
        if self.config["discrete_actions"]:
            action = DISCRETE_ACTIONS[int(action)]
        assert len(action) == 2, "Invalid action {}".format(action)
        if self.config["squash_action_logits"]:
            forward = 2 * float(sigmoid(action[0]) - 0.5)
            throttle = float(np.clip(forward, 0, 1))
            brake = float(np.abs(np.clip(forward, -1, 0)))
            steer = 2 * float(sigmoid(action[1]) - 0.5)
        else:
            throttle = float(np.clip(action[0], 0, 1))
            brake = float(np.abs(np.clip(action[0], -1, 0)))
            steer = float(np.clip(action[1], -1, 1))
        reverse = False
        hand_brake = False

        if self.config["verbose"]:
            print("steer", steer, "throttle", throttle, "brake", brake,
                  "reverse", reverse)

        self.client.send_control(steer=steer,
                                 throttle=throttle,
                                 brake=brake,
                                 hand_brake=hand_brake,
                                 reverse=reverse)

        # Process observations
        image, py_measurements = self._read_observation()
        if self.config["verbose"]:
            print("Next command", py_measurements["next_command"])
        if type(action) is np.ndarray:
            py_measurements["action"] = [float(a) for a in action]
        else:
            py_measurements["action"] = action
        py_measurements["control"] = {
            "steer": steer,
            "throttle": throttle,
            "brake": brake,
            "reverse": reverse,
            "hand_brake": hand_brake,
        }
        reward = compute_reward(self, self.prev_measurement, py_measurements)
        self.total_reward += reward
        py_measurements["reward"] = reward
        py_measurements["total_reward"] = self.total_reward
        done = (self.num_steps > self.scenario["max_steps"]
                or py_measurements["next_command"] == "REACH_GOAL"
                or (self.config["early_terminate_on_collision"]
                    and collided_done(py_measurements)))
        py_measurements["done"] = done
        self.prev_measurement = py_measurements

        # Write out measurements to file
        if CARLA_OUT_PATH:
            if not self.measurements_file:
                self.measurements_file = open(
                    os.path.join(
                        CARLA_OUT_PATH,
                        "measurements_{}.json".format(self.episode_id)), "w")
            self.measurements_file.write(json.dumps(py_measurements))
            self.measurements_file.write("\n")
            if done:
                self.measurements_file.close()
                self.measurements_file = None
                if self.config["convert_images_to_video"]:
                    self.images_to_video()

        self.num_steps += 1
        image = self.preprocess_image(image)
        return (self.encode_obs(image, py_measurements), reward, done,
                py_measurements)

    def images_to_video(self):
        videos_dir = os.path.join(CARLA_OUT_PATH, "Videos")
        if not os.path.exists(videos_dir):
            os.makedirs(videos_dir)
        ffmpeg_cmd = (
            "ffmpeg -loglevel -8 -r 60 -f image2 -s {x_res}x{y_res} "
            "-start_number 0 -i "
            "{img}_%04d.jpg -vcodec libx264 {vid}.mp4 && rm -f {img}_*.jpg "
        ).format(x_res=self.config["render_x_res"],
                 y_res=self.config["render_y_res"],
                 vid=os.path.join(videos_dir, self.episode_id),
                 img=os.path.join(CARLA_OUT_PATH, "CameraRGB",
                                  self.episode_id))
        print("Executing ffmpeg command", ffmpeg_cmd)
        subprocess.call(ffmpeg_cmd, shell=True)

    def preprocess_image(self, image):
        if self.config["use_depth_camera"]:
            assert self.config["use_depth_camera"]
            data = (image.data - 0.5) * 2
            data = data.reshape(self.config["render_y_res"],
                                self.config["render_x_res"], 1)
            data = cv2.resize(data,
                              (self.config["x_res"], self.config["y_res"]),
                              interpolation=cv2.INTER_AREA)
            data = np.expand_dims(data, 2)
        else:
            data = image.data.reshape(self.config["render_y_res"],
                                      self.config["render_x_res"], 3)
            data = cv2.resize(data,
                              (self.config["x_res"], self.config["y_res"]),
                              interpolation=cv2.INTER_AREA)
            data = (data.astype(np.float32) - 128) / 128
        return data

    def _read_observation(self):
        # Read the data produced by the server this frame.
        measurements, sensor_data = self.client.read_data()

        # Print some of the measurements.
        if self.config["verbose"]:
            print_measurements(measurements)

        observation = None
        if self.config["use_depth_camera"]:
            camera_name = "CameraDepth"
        else:
            camera_name = "CameraRGB"
        for name, image in sensor_data.items():
            if name == camera_name:
                observation = image

        cur = measurements.player_measurements

        if self.config["enable_planner"]:
            next_command = COMMANDS_ENUM[self.planner.get_next_command(
                [cur.transform.location.x, cur.transform.location.y, GROUND_Z],
                [
                    cur.transform.orientation.x, cur.transform.orientation.y,
                    GROUND_Z
                ],
                [self.end_pos.location.x, self.end_pos.location.y, GROUND_Z], [
                    self.end_pos.orientation.x, self.end_pos.orientation.y,
                    GROUND_Z
                ])]
        else:
            next_command = "LANE_FOLLOW"

        if next_command == "REACH_GOAL":
            distance_to_goal = 0.0  # avoids crash in planner
        elif self.config["enable_planner"]:
            distance_to_goal = self.planner.get_shortest_path_distance([
                cur.transform.location.x, cur.transform.location.y, GROUND_Z
            ], [
                cur.transform.orientation.x, cur.transform.orientation.y,
                GROUND_Z
            ], [self.end_pos.location.x, self.end_pos.location.y, GROUND_Z], [
                self.end_pos.orientation.x, self.end_pos.orientation.y,
                GROUND_Z
            ]) / 100
        else:
            distance_to_goal = -1

        distance_to_goal_euclidean = float(
            np.linalg.norm([
                cur.transform.location.x - self.end_pos.location.x,
                cur.transform.location.y - self.end_pos.location.y
            ]) / 100)

        py_measurements = {
            "episode_id": self.episode_id,
            "step": self.num_steps,
            "x": cur.transform.location.x,
            "y": cur.transform.location.y,
            "x_orient": cur.transform.orientation.x,
            "y_orient": cur.transform.orientation.y,
            "forward_speed": cur.forward_speed,
            "distance_to_goal": distance_to_goal,
            "distance_to_goal_euclidean": distance_to_goal_euclidean,
            "collision_vehicles": cur.collision_vehicles,
            "collision_pedestrians": cur.collision_pedestrians,
            "collision_other": cur.collision_other,
            "intersection_offroad": cur.intersection_offroad,
            "intersection_otherlane": cur.intersection_otherlane,
            "weather": self.weather,
            "map": self.config["server_map"],
            "start_coord": self.start_coord,
            "end_coord": self.end_coord,
            "current_scenario": self.scenario,
            "x_res": self.config["x_res"],
            "y_res": self.config["y_res"],
            "num_vehicles": self.scenario["num_vehicles"],
            "num_pedestrians": self.scenario["num_pedestrians"],
            "max_steps": self.scenario["max_steps"],
            "next_command":
            next_command,  # TODO can we figure some way using this command
        }

        if CARLA_OUT_PATH and self.config["log_images"]:
            for name, image in sensor_data.items():
                out_dir = os.path.join(CARLA_OUT_PATH, name)
                if not os.path.exists(out_dir):
                    os.makedirs(out_dir)
                out_file = os.path.join(
                    out_dir, "{}_{:>04}.jpg".format(self.episode_id,
                                                    self.num_steps))
                scipy.misc.imsave(out_file, image.data)

        assert observation is not None, sensor_data
        return observation, py_measurements
Ejemplo n.º 5
0
class CarlaEnv(gym.Env):
    def __init__(self, config=ENV_CONFIG):
        self.config = config
        self.city = self.config["server_map"].split("/")[-1]
        if self.config["enable_planner"]:
            self.planner = Planner(self.city)

        if config["discrete_actions"]:
            self.action_space = Discrete(len(DISCRETE_ACTIONS))
        else:
            self.action_space = Box(-1.0, 1.0, shape=(2, ), dtype=np.float32)
        if config["use_depth_camera"]:
            image_space = Box(
                -1.0,
                1.0,
                shape=(config["y_res"], config["x_res"],
                       1 * config["framestack"]),
                dtype=np.float32)
        else:
            image_space = Box(
                0,
                255,
                shape=(config["y_res"], config["x_res"],
                       3 * config["framestack"]),
                dtype=np.uint8)
        self.observation_space = Tuple(  # forward_speed, dist to goal
            [
                image_space,
                Discrete(len(COMMANDS_ENUM)),  # next_command
                Box(-128.0, 128.0, shape=(2, ), dtype=np.float32)
            ])

        # TODO(ekl) this isn't really a proper gym spec
        self._spec = lambda: None
        self._spec.id = "Carla-v0"

        self.server_port = None
        self.server_process = None
        self.client = None
        self.num_steps = 0
        self.total_reward = 0
        self.prev_measurement = None
        self.prev_image = None
        self.episode_id = None
        self.measurements_file = None
        self.weather = None
        self.scenario = None
        self.start_pos = None
        self.end_pos = None
        self.start_coord = None
        self.end_coord = None
        self.last_obs = None

    def init_server(self):
        print("Initializing new Carla server...")
        # Create a new server process and start the client.
        self.server_port = random.randint(10000, 60000)
        self.server_process = subprocess.Popen(
            [
                SERVER_BINARY, self.config["server_map"], "-windowed",
                "-ResX=400", "-ResY=300", "-carla-server",
                "-carla-world-port={}".format(self.server_port)
            ],
            preexec_fn=os.setsid,
            stdout=open(os.devnull, "w"))
        live_carla_processes.add(os.getpgid(self.server_process.pid))

        for i in range(RETRIES_ON_ERROR):
            try:
                self.client = CarlaClient("localhost", self.server_port)
                return self.client.connect()
            except Exception as e:
                print("Error connecting: {}, attempt {}".format(e, i))
                time.sleep(2)

    def clear_server_state(self):
        print("Clearing Carla server state")
        try:
            if self.client:
                self.client.disconnect()
                self.client = None
        except Exception as e:
            print("Error disconnecting client: {}".format(e))
            pass
        if self.server_process:
            pgid = os.getpgid(self.server_process.pid)
            os.killpg(pgid, signal.SIGKILL)
            live_carla_processes.remove(pgid)
            self.server_port = None
            self.server_process = None

    def __del__(self):
        self.clear_server_state()

    def reset(self):
        error = None
        for _ in range(RETRIES_ON_ERROR):
            try:
                if not self.server_process:
                    self.init_server()
                return self._reset()
            except Exception as e:
                print("Error during reset: {}".format(traceback.format_exc()))
                self.clear_server_state()
                error = e
        raise error

    def _reset(self):
        self.num_steps = 0
        self.total_reward = 0
        self.prev_measurement = None
        self.prev_image = None
        self.episode_id = datetime.today().strftime("%Y-%m-%d_%H-%M-%S_%f")
        self.measurements_file = None

        # Create a CarlaSettings object. This object is a wrapper around
        # the CarlaSettings.ini file. Here we set the configuration we
        # want for the new episode.
        settings = CarlaSettings()
        self.scenario = random.choice(self.config["scenarios"])
        assert self.scenario["city"] == self.city, (self.scenario, self.city)
        self.weather = random.choice(self.scenario["weather_distribution"])
        settings.set(
            SynchronousMode=True,
            SendNonPlayerAgentsInfo=True,
            NumberOfVehicles=self.scenario["num_vehicles"],
            NumberOfPedestrians=self.scenario["num_pedestrians"],
            WeatherId=self.weather)
        settings.randomize_seeds()

        if self.config["use_depth_camera"]:
            camera1 = Camera("CameraDepth", PostProcessing="Depth")
            camera1.set_image_size(self.config["render_x_res"],
                                   self.config["render_y_res"])
            camera1.set_position(30, 0, 130)
            settings.add_sensor(camera1)

        camera2 = Camera("CameraRGB")
        camera2.set_image_size(self.config["render_x_res"],
                               self.config["render_y_res"])
        camera2.set_position(30, 0, 130)
        settings.add_sensor(camera2)

        # Setup start and end positions
        scene = self.client.load_settings(settings)
        positions = scene.player_start_spots
        self.start_pos = positions[self.scenario["start_pos_id"]]
        self.end_pos = positions[self.scenario["end_pos_id"]]
        self.start_coord = [
            self.start_pos.location.x // 100, self.start_pos.location.y // 100
        ]
        self.end_coord = [
            self.end_pos.location.x // 100, self.end_pos.location.y // 100
        ]
        print("Start pos {} ({}), end {} ({})".format(
            self.scenario["start_pos_id"], self.start_coord,
            self.scenario["end_pos_id"], self.end_coord))

        # Notify the server that we want to start the episode at the
        # player_start index. This function blocks until the server is ready
        # to start the episode.
        print("Starting new episode...")
        self.client.start_episode(self.scenario["start_pos_id"])

        image, py_measurements = self._read_observation()
        self.prev_measurement = py_measurements
        return self.encode_obs(self.preprocess_image(image), py_measurements)

    def encode_obs(self, image, py_measurements):
        assert self.config["framestack"] in [1, 2]
        prev_image = self.prev_image
        self.prev_image = image
        if prev_image is None:
            prev_image = image
        if self.config["framestack"] == 2:
            image = np.concatenate([prev_image, image], axis=2)
        obs = (image, COMMAND_ORDINAL[py_measurements["next_command"]], [
            py_measurements["forward_speed"],
            py_measurements["distance_to_goal"]
        ])
        self.last_obs = obs
        return obs

    def step(self, action):
        try:
            obs = self._step(action)
            return obs
        except Exception:
            print("Error during step, terminating episode early",
                  traceback.format_exc())
            self.clear_server_state()
            return (self.last_obs, 0.0, True, {})

    def _step(self, action):
        if self.config["discrete_actions"]:
            action = DISCRETE_ACTIONS[int(action)]
        assert len(action) == 2, "Invalid action {}".format(action)
        if self.config["squash_action_logits"]:
            forward = 2 * float(sigmoid(action[0]) - 0.5)
            throttle = float(np.clip(forward, 0, 1))
            brake = float(np.abs(np.clip(forward, -1, 0)))
            steer = 2 * float(sigmoid(action[1]) - 0.5)
        else:
            throttle = float(np.clip(action[0], 0, 1))
            brake = float(np.abs(np.clip(action[0], -1, 0)))
            steer = float(np.clip(action[1], -1, 1))
        reverse = False
        hand_brake = False

        if self.config["verbose"]:
            print("steer", steer, "throttle", throttle, "brake", brake,
                  "reverse", reverse)

        self.client.send_control(
            steer=steer,
            throttle=throttle,
            brake=brake,
            hand_brake=hand_brake,
            reverse=reverse)

        # Process observations
        image, py_measurements = self._read_observation()
        if self.config["verbose"]:
            print("Next command", py_measurements["next_command"])
        if type(action) is np.ndarray:
            py_measurements["action"] = [float(a) for a in action]
        else:
            py_measurements["action"] = action
        py_measurements["control"] = {
            "steer": steer,
            "throttle": throttle,
            "brake": brake,
            "reverse": reverse,
            "hand_brake": hand_brake,
        }
        reward = compute_reward(self, self.prev_measurement, py_measurements)
        self.total_reward += reward
        py_measurements["reward"] = reward
        py_measurements["total_reward"] = self.total_reward
        done = (self.num_steps > self.scenario["max_steps"]
                or py_measurements["next_command"] == "REACH_GOAL"
                or (self.config["early_terminate_on_collision"]
                    and collided_done(py_measurements)))
        py_measurements["done"] = done
        self.prev_measurement = py_measurements

        # Write out measurements to file
        if CARLA_OUT_PATH:
            if not self.measurements_file:
                self.measurements_file = open(
                    os.path.join(
                        CARLA_OUT_PATH,
                        "measurements_{}.json".format(self.episode_id)), "w")
            self.measurements_file.write(json.dumps(py_measurements))
            self.measurements_file.write("\n")
            if done:
                self.measurements_file.close()
                self.measurements_file = None
                if self.config["convert_images_to_video"]:
                    self.images_to_video()

        self.num_steps += 1
        image = self.preprocess_image(image)
        return (self.encode_obs(image, py_measurements), reward, done,
                py_measurements)

    def images_to_video(self):
        videos_dir = os.path.join(CARLA_OUT_PATH, "Videos")
        if not os.path.exists(videos_dir):
            os.makedirs(videos_dir)
        ffmpeg_cmd = (
            "ffmpeg -loglevel -8 -r 60 -f image2 -s {x_res}x{y_res} "
            "-start_number 0 -i "
            "{img}_%04d.jpg -vcodec libx264 {vid}.mp4 && rm -f {img}_*.jpg "
        ).format(
            x_res=self.config["render_x_res"],
            y_res=self.config["render_y_res"],
            vid=os.path.join(videos_dir, self.episode_id),
            img=os.path.join(CARLA_OUT_PATH, "CameraRGB", self.episode_id))
        print("Executing ffmpeg command", ffmpeg_cmd)
        subprocess.call(ffmpeg_cmd, shell=True)

    def preprocess_image(self, image):
        if self.config["use_depth_camera"]:
            assert self.config["use_depth_camera"]
            data = (image.data - 0.5) * 2
            data = data.reshape(self.config["render_y_res"],
                                self.config["render_x_res"], 1)
            data = cv2.resize(
                data, (self.config["x_res"], self.config["y_res"]),
                interpolation=cv2.INTER_AREA)
            data = np.expand_dims(data, 2)
        else:
            data = image.data.reshape(self.config["render_y_res"],
                                      self.config["render_x_res"], 3)
            data = cv2.resize(
                data, (self.config["x_res"], self.config["y_res"]),
                interpolation=cv2.INTER_AREA)
            data = (data.astype(np.float32) - 128) / 128
        return data

    def _read_observation(self):
        # Read the data produced by the server this frame.
        measurements, sensor_data = self.client.read_data()

        # Print some of the measurements.
        if self.config["verbose"]:
            print_measurements(measurements)

        observation = None
        if self.config["use_depth_camera"]:
            camera_name = "CameraDepth"
        else:
            camera_name = "CameraRGB"
        for name, image in sensor_data.items():
            if name == camera_name:
                observation = image

        cur = measurements.player_measurements

        if self.config["enable_planner"]:
            next_command = COMMANDS_ENUM[self.planner.get_next_command(
                [cur.transform.location.x, cur.transform.location.y, GROUND_Z],
                [
                    cur.transform.orientation.x, cur.transform.orientation.y,
                    GROUND_Z
                ],
                [self.end_pos.location.x, self.end_pos.location.y, GROUND_Z], [
                    self.end_pos.orientation.x, self.end_pos.orientation.y,
                    GROUND_Z
                ])]
        else:
            next_command = "LANE_FOLLOW"

        if next_command == "REACH_GOAL":
            distance_to_goal = 0.0  # avoids crash in planner
        elif self.config["enable_planner"]:
            distance_to_goal = self.planner.get_shortest_path_distance([
                cur.transform.location.x, cur.transform.location.y, GROUND_Z
            ], [
                cur.transform.orientation.x, cur.transform.orientation.y,
                GROUND_Z
            ], [self.end_pos.location.x, self.end_pos.location.y, GROUND_Z], [
                self.end_pos.orientation.x, self.end_pos.orientation.y,
                GROUND_Z
            ]) / 100
        else:
            distance_to_goal = -1

        distance_to_goal_euclidean = float(
            np.linalg.norm([
                cur.transform.location.x - self.end_pos.location.x,
                cur.transform.location.y - self.end_pos.location.y
            ]) / 100)

        py_measurements = {
            "episode_id": self.episode_id,
            "step": self.num_steps,
            "x": cur.transform.location.x,
            "y": cur.transform.location.y,
            "x_orient": cur.transform.orientation.x,
            "y_orient": cur.transform.orientation.y,
            "forward_speed": cur.forward_speed,
            "distance_to_goal": distance_to_goal,
            "distance_to_goal_euclidean": distance_to_goal_euclidean,
            "collision_vehicles": cur.collision_vehicles,
            "collision_pedestrians": cur.collision_pedestrians,
            "collision_other": cur.collision_other,
            "intersection_offroad": cur.intersection_offroad,
            "intersection_otherlane": cur.intersection_otherlane,
            "weather": self.weather,
            "map": self.config["server_map"],
            "start_coord": self.start_coord,
            "end_coord": self.end_coord,
            "current_scenario": self.scenario,
            "x_res": self.config["x_res"],
            "y_res": self.config["y_res"],
            "num_vehicles": self.scenario["num_vehicles"],
            "num_pedestrians": self.scenario["num_pedestrians"],
            "max_steps": self.scenario["max_steps"],
            "next_command": next_command,
        }

        if CARLA_OUT_PATH and self.config["log_images"]:
            for name, image in sensor_data.items():
                out_dir = os.path.join(CARLA_OUT_PATH, name)
                if not os.path.exists(out_dir):
                    os.makedirs(out_dir)
                out_file = os.path.join(
                    out_dir, "{}_{:>04}.jpg".format(self.episode_id,
                                                    self.num_steps))
                scipy.misc.imsave(out_file, image.data)

        assert observation is not None, sensor_data
        return observation, py_measurements
Ejemplo n.º 6
0
class DrivingBenchmark(object):
    """
    The Benchmark class, controls the execution of the benchmark interfacing
    an Agent class with a set Suite.


    The benchmark class must be inherited with a class that defines the
    all the experiments to be run by the agent
    """
    def __init__(self,
                 city_name='Town01',
                 name_to_save='Test',
                 continue_experiment=False,
                 save_images=False,
                 distance_for_success=2.0):

        self.__metaclass__ = abc.ABCMeta

        self._city_name = city_name
        self._base_name = name_to_save
        # The minimum distance for arriving into the goal point in
        # order to consider ir a success
        self._distance_for_success = distance_for_success
        # The object used to record the benchmark and to able to continue after
        self._recording = Recording(dir_to_save=city_name,
                                    name_to_save=name_to_save,
                                    continue_experiment=continue_experiment,
                                    save_images=save_images)

        # We have a default planner instantiated that produces high level commands
        self._planner = Planner(city_name)

        self._episode_number = 0

    def benchmark_agent(self, experiment_suite, agent, client):
        """
        Function to benchmark the agent.
        It first check the log file for this benchmark.
        if it exist it continues from the experiment where it stopped.


        Args:
            experiment_suite
            agent: an agent object with the run step class implemented.
            client:


        Return:
            A dictionary with all the metrics computed from the
            agent running the set of experiments.
        """

        # Instantiate a metric object that will be used to compute the metrics for
        # the benchmark afterwards.
        metrics_object = Metrics(experiment_suite.metrics_parameters,
                                 experiment_suite.dynamic_tasks)

        # Function return the current pose and task for this benchmark.
        start_pose, start_experiment = self._recording.get_pose_and_experiment(
            experiment_suite.get_number_of_poses_task())

        logging.info('START')

        for experiment in experiment_suite.get_experiments(
        )[int(start_experiment):]:

            positions = client.load_settings(
                experiment.conditions).player_start_spots

            self._recording.log_start(experiment.task)

            for pose in experiment.poses[start_pose:]:
                for rep in range(experiment.repetitions):

                    start_index = pose[0]
                    end_index = pose[1]

                    client.start_episode(start_index)
                    self._episode_number += 1
                    # Print information on
                    logging.info('======== !!!! ==========')
                    logging.info('Episode Number: %d', self._episode_number)
                    logging.info(' Start Position %d End Position %d ',
                                 start_index, end_index)

                    self._recording.log_poses(start_index, end_index,
                                              experiment.Conditions.WeatherId)

                    # Calculate the initial distance for this episode
                    initial_distance = \
                        sldist(
                            [positions[start_index].location.x, positions[start_index].location.y],
                            [positions[end_index].location.x, positions[end_index].location.y])

                    time_out = experiment_suite.calculate_time_out(
                        self._get_shortest_path(positions[start_index],
                                                positions[end_index]))

                    # running the agent
                    (result, reward_vec, control_vec, final_time, remaining_distance) = \
                        self._run_navigation_episode(
                            agent, client, time_out, positions[end_index],
                            str(experiment.Conditions.WeatherId) + '_'
                            + str(experiment.task) + '_' + str(start_index)
                            + '.' + str(end_index))

                    # Write the general status of the just ran episode
                    self._recording.write_summary_results(
                        experiment, pose, rep, initial_distance,
                        remaining_distance, final_time, time_out, result)

                    # Write the details of this episode.
                    self._recording.write_measurements_results(
                        experiment, rep, pose, reward_vec, control_vec)
                    if result > 0:
                        logging.info(
                            '+++++ Target achieved in %f seconds! +++++',
                            final_time)
                    else:
                        logging.info('----- Timeout! -----')

            start_pose = 0

        self._recording.log_end()

        return metrics_object.compute(self._recording.path)

    def get_path(self):
        """
        Returns the path were the log was saved.
        """
        return self._recording.path

    def _get_directions(self, current_point, end_point):
        """
        Class that should return the directions to reach a certain goal
        """

        directions = self._planner.get_next_command(
            (current_point.location.x, current_point.location.y, 0.22),
            (current_point.orientation.x, current_point.orientation.y,
             current_point.orientation.z),
            (end_point.location.x, end_point.location.y, 0.22),
            (end_point.orientation.x, end_point.orientation.y,
             end_point.orientation.z))
        return directions

    def _get_shortest_path(self, start_point, end_point):
        """
        Calculates the shortest path between two points considering the road netowrk
        """

        return self._planner.get_shortest_path_distance(
            [start_point.location.x, start_point.location.y, 0.22],
            [start_point.orientation.x, start_point.orientation.y, 0.22],
            [end_point.location.x, end_point.location.y, end_point.location.z],
            [
                end_point.orientation.x, end_point.orientation.y,
                end_point.orientation.z
            ])

    def _run_navigation_episode(self, agent, client, time_out, target,
                                episode_name):
        """
         Run one episode of the benchmark (Pose) for a certain agent.


        Args:
            agent: the agent object
            client: an object of the carla client to communicate
            with the CARLA simulator
            time_out: the time limit to complete this episode
            target: the target to reach
            episode_name: The name for saving images of this episode

        """

        # Send an initial command.
        measurements, sensor_data = client.read_data()
        client.send_control(VehicleControl())

        initial_timestamp = measurements.game_timestamp
        current_timestamp = initial_timestamp

        # The vector containing all measurements produced on this episode
        measurement_vec = []
        # The vector containing all controls produced on this episode
        control_vec = []
        frame = 0
        distance = 10000
        success = False

        while (current_timestamp - initial_timestamp) < (time_out *
                                                         1000) and not success:

            # Read data from server with the client
            measurements, sensor_data = client.read_data()
            # The directions to reach the goal are calculated.
            directions = self._get_directions(
                measurements.player_measurements.transform, target)
            # Agent process the data.
            # control = agent.run_step(measurements, sensor_data, directions, target)
            control = agent.run_step(measurements, sensor_data, directions,
                                     target)
            # Send the control commands to the vehicle
            client.send_control(control)

            # save images if the flag is activated
            self._recording.save_images(sensor_data, episode_name, frame)

            current_x = measurements.player_measurements.transform.location.x
            current_y = measurements.player_measurements.transform.location.y

            logging.info("Controller is Inputting:")
            logging.info('Steer = %f Throttle = %f Brake = %f ', control.steer,
                         control.throttle, control.brake)

            current_timestamp = measurements.game_timestamp
            # Get the distance travelled until now
            distance = sldist([current_x, current_y],
                              [target.location.x, target.location.y])
            # Write status of the run on verbose mode
            logging.info('Status:')
            logging.info('[d=%f] c_x = %f, c_y = %f ---> t_x = %f, t_y = %f',
                         float(distance), current_x, current_y,
                         target.location.x, target.location.y)
            # Check if reach the target
            if distance < self._distance_for_success:
                success = True

            # Increment the vectors and append the measurements and controls.
            frame += 1
            measurement_vec.append(measurements.player_measurements)
            control_vec.append(control)

        if success:
            return 1, measurement_vec, control_vec, float(
                current_timestamp - initial_timestamp) / 1000.0, distance
        return 0, measurement_vec, control_vec, time_out, distance
Ejemplo n.º 7
0
class DrivingBenchmark(object):
    """
    The Benchmark class, controls the execution of the benchmark interfacing
    an Agent class with a set Suite.


    The benchmark class must be inherited with a class that defines the
    all the experiments to be run by the agent
    """
    def __init__(self,
                 city_name='Town01',
                 name_to_save='Test',
                 continue_experiment=False,
                 save_images=False,
                 distance_for_success=2.0):

        self.__metaclass__ = abc.ABCMeta

        self._city_name = city_name
        self._base_name = name_to_save
        # The minimum distance for arriving into the goal point in
        # order to consider ir a success
        self._distance_for_success = distance_for_success
        # The object used to record the benchmark and to able to continue after
        self._recording = Recording(name_to_save=name_to_save,
                                    continue_experiment=continue_experiment,
                                    save_images=save_images)

        # We have a default planner instantiated that produces high level commands
        self._planner = Planner(city_name)

    def benchmark_agent(self, experiment_suite, agent, client):
        """
        Function to benchmark the agent.
        It first check the log file for this benchmark.
        if it exist it continues from the experiment where it stopped.


        Args:
            experiment_suite
            agent: an agent object with the run step class implemented.
            client:


        Return:
            A dictionary with all the metrics computed from the
            agent running the set of experiments.
        """

        # Instantiate a metric object that will be used to compute the metrics for
        # the benchmark afterwards.
        data = OrderedDict()
        metrics_object = Metrics(experiment_suite.metrics_parameters,
                                 experiment_suite.dynamic_tasks)

        # Function return the current pose and task for this benchmark.
        start_pose, start_experiment = self._recording.get_pose_and_experiment(
            experiment_suite.get_number_of_poses_task())

        logging.info('START')
        cols = [
            'Start_position', 'End_position', 'Total-Distance', 'Time',
            "Distance-Travelled", "Follow_lane", "Straight", "Left", "Right",
            "Avg-Speed", "Collision-Pedestrian", "Collision-Vehicle",
            "Collision-Other", "Intersection-Lane", "Intersection-Offroad",
            "Traffic_Light_Infraction", "Success"
        ]
        df = pd.DataFrame(columns=cols)
        ts = str(int(time.time()))

        for experiment in experiment_suite.get_experiments(
        )[int(start_experiment):]:

            positions = client.load_settings(
                experiment.conditions).player_start_spots

            self._recording.log_start(experiment.task)

            for pose in experiment.poses[start_pose:]:
                data['Start_position'] = pose[0]
                data['End_position'] = pose[1]

                #print(df)

                for rep in range(experiment.repetitions):

                    start_index = pose[0]
                    end_index = pose[1]

                    client.start_episode(start_index)
                    # Print information on
                    logging.info('======== !!!! ==========')
                    logging.info(' Start Position %d End Position %d ',
                                 start_index, end_index)

                    self._recording.log_poses(start_index, end_index,
                                              experiment.Conditions.WeatherId)

                    # Calculate the initial distance for this episode
                    initial_distance = \
                        sldist(
                            [positions[start_index].location.x, positions[start_index].location.y],
                            [positions[end_index].location.x, positions[end_index].location.y])
                    data["Total-Distance"] = initial_distance
                    time_out = experiment_suite.calculate_time_out(
                        self._get_shortest_path(positions[start_index],
                                                positions[end_index]))

                    # running the agent
                    (result, reward_vec, control_vec, final_time, remaining_distance, data) = \
                        self._run_navigation_episode(
                            agent, client, time_out, positions[end_index],
                            str(experiment.Conditions.WeatherId) + '_'
                            + str(experiment.task) + '_' + str(start_index)
                            + '.' + str(end_index) , data)
                    data["Time"] = final_time
                    # Write the general status of the just ran episode
                    self._recording.write_summary_results(
                        experiment, pose, rep, initial_distance,
                        remaining_distance, final_time, time_out, result)

                    # Write the details of this episode.
                    self._recording.write_measurements_results(
                        experiment, rep, pose, reward_vec, control_vec)
                    data['Success'] = result
                    if result > 0:
                        logging.info(
                            '+++++ Target achieved in %f seconds! +++++',
                            final_time)

                    else:
                        logging.info('----- Timeout! -----')
                #data['Start_position'] = start_index
                #data['End_position'] = end_index
                #df = df.append(data , ignore_index=True)
                #print(df)
                data["Avg-Speed"] = 3.6 * (data['Total-Distance'] /
                                           data['Time'])
                df = df.append(data, ignore_index=True)
                df = df[cols]
                try:
                    df.to_csv("Test_result_" + ts + '.csv',
                              columns=cols,
                              index=True)
                except:
                    print("File being used will save later")
            start_pose = 0

        self._recording.log_end()

        return metrics_object.compute(self._recording.path)

    def get_path(self):
        """
        Returns the path were the log was saved.
        """
        return self._recording.path

    def _get_directions(self, current_point, end_point):
        """
        Class that should return the directions to reach a certain goal
        """

        directions = self._planner.get_next_command(
            (current_point.location.x, current_point.location.y, 0.22),
            (current_point.orientation.x, current_point.orientation.y,
             current_point.orientation.z),
            (end_point.location.x, end_point.location.y, 0.22),
            (end_point.orientation.x, end_point.orientation.y,
             end_point.orientation.z))
        return directions

    def _get_shortest_path(self, start_point, end_point):
        """
        Calculates the shortest path between two points considering the road netowrk
        """

        return self._planner.get_shortest_path_distance(
            [start_point.location.x, start_point.location.y, 0.22],
            [start_point.orientation.x, start_point.orientation.y, 0.22],
            [end_point.location.x, end_point.location.y, end_point.location.z],
            [
                end_point.orientation.x, end_point.orientation.y,
                end_point.orientation.z
            ])

    def _run_navigation_episode(self, agent, client, time_out, target,
                                episode_name, data):
        """
         Run one episode of the benchmark (Pose) for a certain agent.


        Args:
            agent: the agent object
            client: an object of the carla client to communicate
            with the CARLA simulator
            time_out: the time limit to complete this episode
            target: the target to reach
            episode_name: The name for saving images of this episode

        """

        # Send an initial command.
        direction2text = {
            0: 'Follow_lane',
            1: 'Follow_lane',
            2: 'Follow_lane',
            3: 'Left',
            4: 'Right',
            5: 'Straight'
        }
        measurements, sensor_data = client.read_data()
        client.send_control(VehicleControl())

        initial_timestamp = measurements.game_timestamp
        current_timestamp = initial_timestamp

        # The vector containing all measurements produced on this episode
        measurement_vec = []
        # The vector containing all controls produced on this episode
        control_vec = []
        frame = 0
        distance = 10000
        success = False
        # Initialize lane and Offroad fields
        data["Intersection-Lane"] = 0
        data["Intersection-Offroad"] = 0
        data["Directions"] = []
        agent.command_follower.controller.params[
            'target_speed'] = agent.command_follower.controller.params[
                'default_speed_limit']
        agent.traffic_light_infraction = False
        for x in range(6):
            data[direction2text[x]] = 0
        data["Collision-Pedestrian"] = 0
        data["Collision-Vehicle"] = 0
        data["Collision-Other"] = 0
        while True:  #

            # Read data from server with the client
            measurements, sensor_data = client.read_data()
            # The directions to reach the goal are calculated.
            directions = self._get_directions(
                measurements.player_measurements.transform, target)
            # Agent process the data.
            if data[direction2text[directions]] == 0:
                data[direction2text[directions]] = 1
            control, controller_state = agent.run_step(measurements,
                                                       sensor_data, directions,
                                                       target)
            # Send the control commands to the vehicle
            client.send_control(control)
            if measurements.player_measurements.intersection_otherlane > data[
                    "Intersection-Lane"]:
                data[
                    "Intersection-Lane"] = measurements.player_measurements.intersection_otherlane
            if measurements.player_measurements.intersection_offroad > data[
                    "Intersection-Offroad"]:
                data[
                    "Intersection-Offroad"] = measurements.player_measurements.intersection_offroad
            # save images if the flag is activated
            self._recording.save_images(sensor_data, episode_name, frame)

            current_x = measurements.player_measurements.transform.location.x
            current_y = measurements.player_measurements.transform.location.y
            # If vehicle has stopped for pedestiran, vehicle or traffic light increase timeout by 1 unit
            if min(controller_state['stop_pedestrian'], controller_state['stop_vehicle'],\
                controller_state['stop_traffic_lights']) <= 0.3 :
                time_out += (measurements.game_timestamp -
                             current_timestamp) / 1000
            #logging.info("Controller sis Inputting:")
            #logging.info('Steer = %f Throttle = %f Brake = %f ',
            #             control.steer, control.throttle, control.brake)
            current_timestamp = measurements.game_timestamp
            # Get the distance travelled until now
            distance = sldist([current_x, current_y],
                              [target.location.x, target.location.y])
            # Write status of the run on verbose mode
            #logging.info('Status:')
            print(
                f"Distance to target: {float(distance):.2f} \t Time_taken: {(current_timestamp -initial_timestamp)/1000 :.2f}\t Timeout : {time_out :.2f}"
            )
            # Check if reach the target
            data["Distance-Travelled"] = data["Total-Distance"] - distance
            if distance < self._distance_for_success:
                success = True
                break
            if (current_timestamp - initial_timestamp) / 1000 > time_out:
                data["Intersection-Offroad"] = 0
                data["Intersection-Lane"] = 0
                data["Collision-Pedestrian"] = 0
                data["Collision-Vehicle"] = 0
                data["Collision-Other"] = 0
                success = False
                print("Timeout")
                break

            # Increment the vectors and append the measurements and controls.
            frame += 1
            measurement_vec.append(measurements.player_measurements)
            control_vec.append(control)

            #Stop in case of collision or excessive lane infraction
            if measurements.player_measurements.collision_pedestrians >300 or \
                measurements.player_measurements.collision_vehicles > 300 or \
                measurements.player_measurements.collision_other>300:
                if measurements.player_measurements.collision_pedestrians > 300:
                    data["Collision-Pedestrian"] = 1
                elif measurements.player_measurements.collision_vehicles > 300:
                    data["Collision-Vehicle"] = 1
                elif measurements.player_measurements.collision_other > 300:
                    data["Collision-Other"] = 1
                data["Intersection-Offroad"] = 0
                data["Intersection-Lane"] = 0
                print("Collison detected")
                success = False
                break

            if data["Intersection-Lane"] > 0.5:
                data["Intersection-Lane"] = 1
                data["Intersection-Offroad"] = 0
                data["Collision-Pedestrian"] = 0
                data["Collision-Vehicle"] = 0
                data["Collision-Other"] = 0
                print("Lane Infraction")
                success = False
                break
            elif data["Intersection-Offroad"] > 0.5:
                data["Intersection-Offroad"] = 1
                data["Intersection-Lane"] = 0
                data["Collision-Pedestrian"] = 0
                data["Collision-Vehicle"] = 0
                data["Collision-Other"] = 0
                print("Lane Infraction")
                success = False
                break
        if success:
            data["Intersection-Offroad"] = 0
            data["Intersection-Lane"] = 0
            data["Collision-Pedestrian"] = 0
            data["Collision-Vehicle"] = 0
            data["Collision-Other"] = 0
            return 1, measurement_vec, control_vec, float(
                current_timestamp - initial_timestamp) / 1000.0, distance, data
        return 0, measurement_vec, control_vec, time_out, distance, data
Ejemplo n.º 8
0
class CarlaEnv(gym.Env):
    def __init__(self, config=ENV_CONFIG):
        self.config = config
        self.city = self.config["server_map"].split("/")[-1]
        if self.config["enable_planner"]:
            self.planner = Planner(self.city)

        # The Action Space
        if config["discrete_actions"]:
            self.action_space = Discrete(
                len(DISCRETE_ACTIONS
                    ))  # It will be transformed to continuous 2D action.
        else:
            self.action_space = Box(-1.0, 1.0, shape=(2, ),
                                    dtype=np.float32)  # 2D action.

        if config["use_depth_camera"]:
            image_space = Box(-1.0,
                              1.0,
                              shape=(config["y_res"], config["x_res"],
                                     1 * config["framestack"]),
                              dtype=np.float32)
        else:
            image_space = Box(0,
                              255,
                              shape=(config["y_res"], config["x_res"],
                                     3 * config["framestack"]),
                              dtype=np.uint8)
        # encode_measure ---> rgb + depth + pre_rgb + measurement_encode
        # 3 + 1 + 3 + 1 = 8
        if config["encode_measurement"]:
            image_space = Box(0,
                              255,
                              shape=(config["y_res"], config["x_res"], 8),
                              dtype=np.float32)

        # The Observation Space
        self.observation_space = Tuple([
            image_space,
            Discrete(len(COMMANDS_ENUM)),  # next_command
            Box(-128.0, 128.0, shape=(2, ),
                dtype=np.float32)  # forward_speed, dist to goal
        ])

        # TODO(ekl) this isn't really a proper gym spec
        self._spec = lambda: None
        self._spec.id = "Carla-v0"

        self.server_port = None
        self.server_process = None
        self.client = None
        self.num_steps = 0
        self.total_reward = 0
        self.prev_measurement = None
        self.prev_image = None
        self.episode_id = None
        self.measurements_file = None
        self.weather = None
        self.scenario = None
        self.start_pos = None
        self.end_pos = None
        self.start_coord = None
        self.end_coord = None
        self.last_obs = None

    def init_server(self):
        print("Initializing new Carla server...")
        # Create a new server process and start the client.
        self.server_port = random.randint(1000, 60000)
        self.server_process = subprocess.Popen(
            [
                SERVER_BINARY,
                self.config["server_map"],
                "-windowed",
                "-ResX=800",
                "-ResY=600",
                "-carla-server",
                "-benchmark -fps=10",  #: to run the simulation at a fixed time-step of 0.1 seconds
                "-carla-world-port={}".format(self.server_port)
            ],
            preexec_fn=os.setsid,
            stdout=open(os.devnull, "w"))
        live_carla_processes.add(os.getpgid(self.server_process.pid))

        for i in range(RETRIES_ON_ERROR):
            try:
                self.client = CarlaClient("localhost", self.server_port)
                return self.client.connect()
            except Exception as e:
                print("Error connecting: {}, attempt {}".format(e, i))
                time.sleep(2)

    def clear_server_state(self):
        print("Clearing Carla server state")
        try:
            if self.client:
                self.client.disconnect()
                self.client = None
        except Exception as e:
            print("Error disconnecting client: {}".format(e))
            pass
        if self.server_process:
            pgid = os.getpgid(self.server_process.pid)
            os.killpg(pgid, signal.SIGKILL)
            live_carla_processes.remove(pgid)
            self.server_port = None
            self.server_process = None

    def __del__(
        self
    ):  # the __del__ method will be called when the instance of the class is deleted.(memory is freed.)
        self.clear_server_state()

    def reset(self):
        error = None
        for _ in range(RETRIES_ON_ERROR):
            try:
                if not self.server_process:
                    self.init_server()
                return self._reset()
            except Exception as e:
                print("Error during reset: {}".format(traceback.format_exc()))
                self.clear_server_state()
                error = e
        raise error

    def _reset(self):
        self.num_steps = 0
        self.total_reward = 0
        self.prev_measurement = None
        self.prev_image = None
        self.episode_id = datetime.today().strftime("%Y-%m-%d_%H-%M-%S_%f")
        self.measurements_file = None

        # Create a CarlaSettings object. This object is a wrapper around
        # the CarlaSettings.ini file. Here we set the configuration we
        # want for the new episode.
        settings = CarlaSettings()
        self.scenario = random.choice(self.config["scenarios"])
        assert self.scenario["city"] == self.city, (self.scenario, self.city)
        self.weather = random.choice(self.scenario["weather_distribution"])
        settings.set(
            SynchronousMode=True,
            # ServerTimeOut=10000, # CarlaSettings: no key named 'ServerTimeOut'
            SendNonPlayerAgentsInfo=True,
            NumberOfVehicles=self.scenario["num_vehicles"],
            NumberOfPedestrians=self.scenario["num_pedestrians"],
            WeatherId=self.weather)
        settings.randomize_seeds()

        if self.config["use_depth_camera"]:
            camera1 = Camera("CameraDepth", PostProcessing="Depth")
            camera1.set_image_size(self.config["render_x_res"],
                                   self.config["render_y_res"])
            # camera1.set_position(30, 0, 170)
            camera1.set_position(0.5, 0.0, 1.6)
            # camera1.set_rotation(0.0, 0.0, 0.0)

            settings.add_sensor(camera1)

        if self.config["encode_measurement"]:
            camera1 = Camera("CameraDepth", PostProcessing="Depth")
            camera1.set_image_size(self.config["render_x_res"],
                                   self.config["render_y_res"])
            # camera1.set_position(30, 0, 170)
            camera1.set_position(0.5, 0.0, 1.6)
            camera1.set(FOV=120)
            settings.add_sensor(camera1)

        camera2 = Camera("CameraRGB")
        camera2.set_image_size(self.config["render_x_res"],
                               self.config["render_y_res"])
        camera2.set(FOV=120)
        camera2.set_position(0.5, 0.0, 1.6)
        settings.add_sensor(camera2)

        # Setup start and end positions
        scene = self.client.load_settings(settings)
        positions = scene.player_start_spots
        self.start_pos = positions[self.scenario["start_pos_id"]]
        self.end_pos = positions[self.scenario["end_pos_id"]]
        self.start_coord = [
            self.start_pos.location.x, self.start_pos.location.y
        ]
        self.end_coord = [self.end_pos.location.x, self.end_pos.location.y]
        print("Start pos {} ({}), end {} ({})".format(
            self.scenario["start_pos_id"], [int(x) for x in self.start_coord],
            self.scenario["end_pos_id"], [int(x) for x in self.end_coord]))

        # Notify the server that we want to start the episode at the
        # player_start index. This function blocks until the server is ready
        # to start the episode.
        print("Starting new episode...")
        self.client.start_episode(self.scenario["start_pos_id"])

        # Process observations: self._read_observation() returns image and py_measurements.
        image, py_measurements = self._read_observation()
        self.prev_measurement = py_measurements
        return self.encode_obs(self.preprocess_image(image), py_measurements)

    # rgb depth forward_speed next_comment
    def encode_obs(self, image, py_measurements):
        assert self.config["framestack"] in [1, 2]
        prev_image = self.prev_image
        self.prev_image = image
        if prev_image is None:
            prev_image = image
        if self.config["framestack"] == 2:
            image = np.concatenate([prev_image, image], axis=2)
        if self.config["encode_measurement"]:
            feature_map = np.zeros([4, 4])
            feature_map[1, :] = (py_measurements["forward_speed"] - 15) / 15
            feature_map[2, :] = (
                COMMAND_ORDINAL[py_measurements["next_command"]] - 2) / 2
            feature_map[0, 0] = py_measurements["x_orient"]
            feature_map[0, 1] = py_measurements["y_orient"]
            feature_map[0,
                        2] = (py_measurements["distance_to_goal"] - 170) / 170
            feature_map[0,
                        1] = (py_measurements["distance_to_goal_euclidean"] -
                              170) / 170
            feature_map[3, 0] = (py_measurements["x"] - 50) / 150
            feature_map[3, 1] = (py_measurements["y"] - 50) / 150
            feature_map[3, 2] = (py_measurements["end_coord"][0] - 150) / 150
            feature_map[3, 3] = (py_measurements["end_coord"][1] - 150) / 150
            feature_map = np.tile(feature_map, (24, 24))
            image = np.concatenate(
                [prev_image[:, :, 0:3], image, feature_map[:, :, np.newaxis]],
                axis=2)
            # print("image shape after encoding", image.shape)

        obs = (image, COMMAND_ORDINAL[py_measurements["next_command"]], [
            py_measurements["forward_speed"],
            py_measurements["distance_to_goal"]
        ])
        self.last_obs = obs
        # print('distance to goal', py_measurements["distance_to_goal"])
        # print("speed", py_measurements["forward_speed"])
        return obs

    # TODO:example of py_measurement
    # {'episode_id': '2019-02-22_11-26-36_990083',
    #  'step': 0,
    #  'x': 71.53003692626953,
    #  'y': 302.57000732421875,
    #  'x_orient': -1.0,
    #  'y_orient': -6.4373016357421875e-06,
    #  'forward_speed': -3.7578740032934155e-13,
    #  'distance_to_goal': 0.6572,
    #  'distance_to_goal_euclidean': 0.6200001239776611,
    #  'collision_vehicles': 0.0,
    #  'collision_pedestrians': 0.0,
    #  'collision_other': 0.0,
    #  'intersection_offroad': 0.0,
    #  'intersection_otherlane': 0.0,
    #  'weather': 0,
    #  'map': '/Game/Maps/Town02',
    #  'start_coord': [0.0, 3.0],
    #  'end_coord': [0.0, 3.0],
    #  'current_scenario': {'city': 'Town02',
    #                       'num_vehicles': 0,
    #                       'num_pedestrians': 20,
    #                       'weather_distribution': [0],
    #                       'start_pos_id': 36,
    #                       'end_pos_id': 40,
    #                       'max_steps': 600},
    #  'x_res': 10,
    #  'y_res': 10,
    #  'num_vehicles': 0,
    #  'num_pedestrians': 20,
    #  'max_steps': 600,
    #  'next_command': 'GO_STRAIGHT',
    #  'action': (0, 1),
    #  'control': {'steer': 1.0,
    #              'throttle': 0.0,
    #              'brake': 0.0,
    #              'reverse': False,
    #              'hand_brake': False},
    #  'reward': 0.0,
    #  'total_reward': 0.0,
    #  'done': False}

    def step(self, action):
        try:
            obs = self._step(action)
            return obs
        except Exception:
            print("Error during step, terminating episode early",
                  traceback.format_exc())
            self.clear_server_state()
            return (self.last_obs, 0.0, True, {})

    def _step(self, action):
        if self.config["discrete_actions"]:
            action = DISCRETE_ACTIONS[int(action)]  # Carla action is 2D.
        assert len(action) == 2, "Invalid action {}".format(action)
        if self.config["squash_action_logits"]:
            forward = 2 * float(sigmoid(action[0]) - 0.5)
            throttle = float(np.clip(forward, 0, 1))
            brake = float(np.abs(np.clip(forward, -1, 0)))
            steer = 2 * float(sigmoid(action[1]) - 0.5)
        else:
            throttle = float(np.clip(action[0], 0, 1))
            brake = float(np.abs(np.clip(action[0], -1, 0)))
            steer = float(np.clip(action[1], -1, 1))

        # reverse and hand_brake are disabled.
        reverse = False
        hand_brake = False

        if self.config["verbose"]:
            print("steer", steer, "throttle", throttle, "brake", brake,
                  "reverse", reverse)

        self.client.send_control(steer=steer,
                                 throttle=throttle,
                                 brake=brake,
                                 hand_brake=hand_brake,
                                 reverse=reverse)

        # Process observations: self._read_observation() returns image and py_measurements.
        image, py_measurements = self._read_observation()
        if self.config["verbose"]:
            print("Next command", py_measurements["next_command"])
        if type(action) is np.ndarray:
            py_measurements["action"] = [float(a) for a in action]
        else:
            py_measurements["action"] = action
        py_measurements["control"] = {
            "steer": steer,
            "throttle": throttle,
            "brake": brake,
            "reverse": reverse,
            "hand_brake": hand_brake,
        }

        # compute reward
        reward = compute_reward(self, self.prev_measurement, py_measurements)

        self.total_reward += reward
        py_measurements["reward"] = reward
        py_measurements["total_reward"] = self.total_reward

        # done or not
        done = (self.num_steps > self.scenario["max_steps"]
                or py_measurements["next_command"] == "REACH_GOAL"
                or (self.config["early_terminate_on_collision"]
                    and collided_done(py_measurements)))

        py_measurements["done"] = done
        self.prev_measurement = py_measurements

        # Write out measurements to file
        if self.config["verbose"] and CARLA_OUT_PATH:
            if not self.measurements_file:
                self.measurements_file = open(
                    os.path.join(
                        CARLA_OUT_PATH,
                        "measurements_{}.json".format(self.episode_id)), "w")
            self.measurements_file.write(json.dumps(py_measurements))
            self.measurements_file.write("\n")
            if done:
                self.measurements_file.close()
                self.measurements_file = None
                if self.config["convert_images_to_video"]:
                    self.images_to_video()

        self.num_steps += 1
        image = self.preprocess_image(image)
        return (self.encode_obs(image, py_measurements), reward, done,
                py_measurements)

    def images_to_video(self):
        videos_dir = os.path.join(CARLA_OUT_PATH, "Videos")
        if not os.path.exists(videos_dir):
            os.makedirs(videos_dir)
        ffmpeg_cmd = (
            "ffmpeg -loglevel -8 -r 60 -f image2 -s {x_res}x{y_res} "
            "-start_number 0 -i "
            "{img}_%04d.jpg -vcodec libx264 {vid}.mp4 && rm -f {img}_*.jpg "
        ).format(x_res=self.config["render_x_res"],
                 y_res=self.config["render_y_res"],
                 vid=os.path.join(videos_dir, self.episode_id),
                 img=os.path.join(CARLA_OUT_PATH, "CameraRGB",
                                  self.episode_id))
        print("Executing ffmpeg command", ffmpeg_cmd)
        subprocess.call(ffmpeg_cmd, shell=True)

    def preprocess_image(self, image):
        if self.config["use_depth_camera"]:
            assert self.config["use_depth_camera"]
            data = (image.data - 0.5) * 2
            data = data.reshape(self.config["render_y_res"],
                                self.config["render_x_res"], 1)
            data = cv2.resize(data,
                              (self.config["x_res"], self.config["y_res"]),
                              interpolation=cv2.INTER_AREA)
            data = np.expand_dims(data, 2)
        elif self.config["encode_measurement"]:
            # data = (image - 0.5) * 2
            data = image.reshape(self.config["render_y_res"],
                                 self.config["render_x_res"], -1)
            #data[:, :, 4] = (data[:, :, 0] - 0.5) * 2
            #data[:, :, 0:3] = (data[:, :, 0:2].astype(np.float32) - 128) / 128
            data = cv2.resize(data,
                              (self.config["x_res"], self.config["y_res"]),
                              interpolation=cv2.INTER_AREA)

        else:
            data = image.data.reshape(self.config["render_y_res"],
                                      self.config["render_x_res"], 3)
            data = cv2.resize(data,
                              (self.config["x_res"], self.config["y_res"]),
                              interpolation=cv2.INTER_AREA)
            data = (data.astype(np.float32) - 128) / 128
        return data

    def _read_observation(self):
        # Read the data produced by the server this frame.
        measurements, sensor_data = self.client.read_data()

        # Print some of the measurements.
        if self.config["verbose"]:
            print_measurements(measurements)

        observation = None
        if self.config["encode_measurement"]:
            # print(sensor_data["CameraRGB"].data.shape, sensor_data["CameraDepth"].data.shape)
            observation = np.concatenate(
                ((sensor_data["CameraRGB"].data.astype(np.float32) - 128) /
                 128,
                 (sensor_data["CameraDepth"].data[:, :, np.newaxis] - 0.5) *
                 0.5),
                axis=2)

            # print("observation_shape", observation.shape)
        else:
            if self.config["use_depth_camera"]:
                camera_name = "CameraDepth"

            else:
                camera_name = "CameraRGB"
            for name, image in sensor_data.items():
                if name == camera_name:
                    observation = image

        cur = measurements.player_measurements

        if self.config["enable_planner"]:
            next_command = COMMANDS_ENUM[self.planner.get_next_command(
                [cur.transform.location.x, cur.transform.location.y, GROUND_Z],
                [
                    cur.transform.orientation.x, cur.transform.orientation.y,
                    GROUND_Z
                ],
                [self.end_pos.location.x, self.end_pos.location.y, GROUND_Z], [
                    self.end_pos.orientation.x, self.end_pos.orientation.y,
                    GROUND_Z
                ])]
        else:
            next_command = "LANE_FOLLOW"

        if next_command == "REACH_GOAL":
            distance_to_goal = 0.0  # avoids crash in planner
        elif self.config["enable_planner"]:
            distance_to_goal = self.planner.get_shortest_path_distance(
                [cur.transform.location.x, cur.transform.location.y, GROUND_Z],
                [
                    cur.transform.orientation.x, cur.transform.orientation.y,
                    GROUND_Z
                ],
                [self.end_pos.location.x, self.end_pos.location.y, GROUND_Z], [
                    self.end_pos.orientation.x, self.end_pos.orientation.y,
                    GROUND_Z
                ])
        # now the metrix should be meter carla8.0
        # TODO run experience to verify the scale of distance in order to determine reward function
        else:
            distance_to_goal = -1

        distance_to_goal_euclidean = float(
            np.linalg.norm([
                cur.transform.location.x - self.end_pos.location.x,
                cur.transform.location.y - self.end_pos.location.y
            ]))

        py_measurements = {
            "episode_id": self.episode_id,
            "step": self.num_steps,
            "x": cur.transform.location.x,
            "y": cur.transform.location.y,
            "x_orient": cur.transform.orientation.x,
            "y_orient": cur.transform.orientation.y,
            "forward_speed": cur.forward_speed,
            "distance_to_goal": distance_to_goal,
            "distance_to_goal_euclidean": distance_to_goal_euclidean,
            "collision_vehicles": cur.collision_vehicles,
            "collision_pedestrians": cur.collision_pedestrians,
            "collision_other": cur.collision_other,
            "intersection_offroad": cur.intersection_offroad,
            "intersection_otherlane": cur.intersection_otherlane,
            "weather": self.weather,
            "map": self.config["server_map"],
            "start_coord": self.start_coord,
            "end_coord": self.end_coord,
            "current_scenario": self.scenario,
            "x_res": self.config["x_res"],
            "y_res": self.config["y_res"],
            "num_vehicles": self.scenario["num_vehicles"],
            "num_pedestrians": self.scenario["num_pedestrians"],
            "max_steps": self.scenario["max_steps"],
            "next_command": next_command,
        }

        if CARLA_OUT_PATH and self.config["log_images"]:
            for name, image in sensor_data.items():
                out_dir = os.path.join(CARLA_OUT_PATH, name)
                if not os.path.exists(out_dir):
                    os.makedirs(out_dir)
                out_file = os.path.join(
                    out_dir, "{}_{:>04}.jpg".format(self.episode_id,
                                                    self.num_steps))
                scipy.misc.imsave(out_file, image.data)

        assert observation is not None, sensor_data
        return observation, py_measurements
Ejemplo n.º 9
0
class CarlaEnv(gym.Env):
    def __init__(self, config=ENV_CONFIG):
        self.config = config
        self.config["x_res"], self.config["y_res"] = IMG_SIZE
        self.city = self.config["server_map"].split("/")[-1]
        if self.config["enable_planner"]:
            self.planner = Planner(self.city)

        # The Action Space
        if config["discrete_actions"]:
            self.action_space = Discrete(
                len(DISCRETE_ACTIONS
                    ))  # It will be transformed to continuous 2D action.
        else:
            self.action_space = Box(-1.0, 1.0, shape=(2, ),
                                    dtype=np.float32)  # 2D action.

        if config["use_sensor"] == 'use_semantic':
            image_space = Box(0.0,
                              255.0,
                              shape=(config["y_res"], config["x_res"],
                                     1 * config["framestack"]),
                              dtype=np.float32)
        elif config["use_sensor"] in ['use_depth', 'use_logdepth']:
            image_space = Box(0.0,
                              255.0,
                              shape=(config["y_res"], config["x_res"],
                                     1 * config["framestack"]),
                              dtype=np.float32)
        elif config["use_sensor"] == 'use_rgb':
            image_space = Box(0,
                              255,
                              shape=(config["y_res"], config["x_res"],
                                     NUM_CHANNELS),
                              dtype=np.uint8)
        elif config["use_sensor"] == 'use_2rgb':
            image_space = Box(0,
                              255,
                              shape=(config["y_res"], config["x_res"],
                                     2 * 3 * config["framestack"]),
                              dtype=np.uint8)

        # The Observation Space
        self.observation_space = Tuple([
            image_space,
            Discrete(len(COMMANDS_ENUM)),  # next_command
            Box(-128.0, 128.0, shape=(2, ),
                dtype=np.float32)  # forward_speed, dist to goal
        ])

        # TODO(ekl) this isn't really a proper gym spec
        self._spec = lambda: None
        self._spec.id = "Carla-v0"

        self.server_port = None
        self.server_process = None
        self.client = None
        self.num_steps = 0
        self.total_reward = 0
        self.prev_measurement = None
        self.prev_image = None
        self.episode_id = None
        self.measurements_file = None
        self.weather = None
        self.scenario = None
        self.start_pos = None
        self.end_pos = None
        self.start_coord = None
        self.end_coord = None
        self.last_obs = None
        self._global_step = 0

    def init_server(self):
        print("Initializing new Carla server...")
        self._global_step = 0
        # Create a new server process and start the client.
        self.server_port = random.randint(10000, 60000)
        self.server_process = subprocess.Popen(
            [
                SERVER_BINARY,
                self.config["server_map"],
                "-windowed",
                "-ResX=480",
                "-ResY=360",
                "-carla-server",
                "-benchmark -fps=10",  # "-benchmark -fps=10": to run the simulation at a fixed time-step of 0.1 seconds
                "-carla-world-port={}".format(self.server_port)
            ],
            preexec_fn=os.setsid,
            stdout=open(os.devnull, "w")
        )  #  ResourceWarning: unclosed file <_io.TextIOWrapper name='/dev/null' mode='w' encoding='UTF-8'>
        live_carla_processes.add(os.getpgid(self.server_process.pid))

        for i in range(RETRIES_ON_ERROR):
            try:
                self.client = CarlaClient("localhost", self.server_port)
                return self.client.connect()
            except Exception as e:
                print("Error connecting: {}, attempt {}".format(e, i))
                time.sleep(2)

    def clear_server_state(self):
        print("Clearing Carla server state")
        try:
            if self.client:
                self.client.disconnect()
                self.client = None
        except Exception as e:
            print("Error disconnecting client: {}".format(e))
            pass
        if self.server_process:
            pgid = os.getpgid(self.server_process.pid)
            os.killpg(pgid, signal.SIGKILL)
            live_carla_processes.remove(pgid)
            self.server_port = None
            self.server_process = None

    def __del__(
        self
    ):  # the __del__ method will be called when the instance of the class is deleted.(memory is freed.)
        self.clear_server_state()

    def reset(self):
        error = None
        for _ in range(RETRIES_ON_ERROR):
            try:
                if not self.server_process:
                    self.init_server()
                return self._reset()
            except Exception as e:
                print("Error during reset: {}".format(traceback.format_exc()))
                self.clear_server_state()
                error = e
        raise error

    # @set_timeout(15)
    def _reset(self):
        self.num_steps = 0
        self.total_reward = 0
        self._global_step = 0
        self.prev_measurement = None
        self.prev_image = None
        self.episode_id = datetime.today().strftime("%Y-%m-%d_%H-%M-%S_%f")
        self.measurements_file = None

        # Create a CarlaSettings object. This object is a wrapper around
        # the CarlaSettings.ini file. Here we set the configuration we
        # want for the new episode.
        settings = CarlaSettings()
        self.scenario = random.choice(self.config["scenarios"])
        assert self.scenario["city"] == self.city, (self.scenario, self.city)
        self.weather = random.choice(self.scenario["weather_distribution"])
        settings.set(
            SynchronousMode=True,
            # ServerTimeOut=10000, # CarlaSettings: no key named 'ServerTimeOut'
            SendNonPlayerAgentsInfo=True,
            NumberOfVehicles=self.scenario["num_vehicles"],
            NumberOfPedestrians=self.scenario["num_pedestrians"],
            WeatherId=self.weather)
        settings.randomize_seeds()

        if self.config["use_sensor"] == 'use_semantic':
            camera0 = Camera("CameraSemantic",
                             PostProcessing="SemanticSegmentation")
            camera0.set_image_size(self.config["render_x_res"],
                                   self.config["render_y_res"])
            # camera0.set_position(30, 0, 130)
            camera0.set(FOV=120)
            camera0.set_position(2.0, 0.0, 1.4)
            camera0.set_rotation(0.0, 0.0, 0.0)

            settings.add_sensor(camera0)

        if self.config["use_sensor"] in ['use_depth', 'use_logdepth']:
            camera1 = Camera("CameraDepth", PostProcessing="Depth")
            camera1.set_image_size(self.config["render_x_res"],
                                   self.config["render_y_res"])
            # camera1.set_position(30, 0, 130)
            camera1.set(FOV=120)
            camera1.set_position(2.0, 0.0, 1.4)
            camera1.set_rotation(0.0, 0.0, 0.0)

            settings.add_sensor(camera1)

        if self.config["use_sensor"] == 'use_rgb':
            camera2 = Camera("CameraRGB")
            camera2.set_image_size(self.config["render_x_res"],
                                   self.config["render_y_res"])
            # camera2.set_position(30, 0, 130)
            # camera2.set_position(0.3, 0.0, 1.3)
            camera2.set(FOV=120)
            camera2.set_position(2.0, 0.0, 1.4)
            camera2.set_rotation(0.0, 0.0, 0.0)

            settings.add_sensor(camera2)

        if self.config["use_sensor"] == 'use_2rgb':
            camera_l = Camera("CameraRGB_L")
            camera_l.set_image_size(self.config["render_x_res"],
                                    self.config["render_y_res"])
            camera_l.set(FOV=120)
            camera_l.set_position(2.0, -0.1, 1.4)
            camera_l.set_rotation(0.0, 0.0, 0.0)
            settings.add_sensor(camera_l)

            camera_r = Camera("CameraRGB_R")
            camera_r.set_image_size(self.config["render_x_res"],
                                    self.config["render_y_res"])
            camera_r.set(FOV=120)
            camera_r.set_position(2.0, 0.1, 1.4)
            camera_r.set_rotation(0.0, 0.0, 0.0)
            settings.add_sensor(camera_r)

        # Setup start and end positions
        scene = self.client.load_settings(settings)
        positions = scene.player_start_spots
        self.start_pos = positions[self.scenario["start_pos_id"]]
        self.end_pos = positions[self.scenario["end_pos_id"]]
        self.start_coord = [
            self.start_pos.location.x, self.start_pos.location.y
        ]
        self.end_coord = [self.end_pos.location.x, self.end_pos.location.y]
        print("Start pos {} ({}), end {} ({})".format(
            self.scenario["start_pos_id"], self.start_coord,
            self.scenario["end_pos_id"], self.end_coord))

        # Notify the server that we want to start the episode at the
        # player_start index. This function blocks until the server is ready
        # to start the episode.
        print("Starting new episode...")
        self.client.start_episode(self.scenario["start_pos_id"])

        # remove the vehicle dropping when starting a new episode.
        cnt = 1
        z1 = 0
        zero_control = VehicleControl()
        while (cnt < 3):
            self.client.send_control(
                zero_control
            )  # VehicleControl().steer = 0, VehicleControl().throttle = 0, VehicleControl().reverse = False
            z0 = z1
            z1 = self.client.read_data(
            )[0].player_measurements.transform.location.z
            print(z1)
            if z0 - z1 == 0:
                cnt += 1
        print('Starting new episode done.\n')

        # Process observations: self._read_observation() returns image and py_measurements.
        image, py_measurements = self._read_observation()
        self.prev_measurement = py_measurements
        return self.encode_obs(self.preprocess_image(image), py_measurements)

    def encode_obs(self, image, py_measurements):
        assert self.config["framestack"] in [1, 2]
        prev_image = self.prev_image
        self.prev_image = image
        if prev_image is None:
            prev_image = image
        if self.config["framestack"] == 2:
            image = np.concatenate([prev_image, image], axis=2)
        if ENCODE:
            image = np.concatenate([
                image, COMMAND_ORDINAL[py_measurements["next_command"]] * 50 *
                np.ones(list(image.shape[:2]) + [1])
            ],
                                   axis=2)
        # obs = (image, COMMAND_ORDINAL[py_measurements["next_command"]], [
        #     py_measurements["forward_speed"],
        #     py_measurements["distance_to_goal"]
        # ])
        obs = image
        self.last_obs = obs
        return obs

    def step(self, action):
        try:
            obs = self._step(action)

            return obs
        except Exception:
            print("Error during step, terminating episode early",
                  traceback.format_exc())
            self.clear_server_state()
            return (self.last_obs, 0.0, True,
                    np.array(1, np.float32).astype(np.float32))

    # image, py_measurements = self._read_observation()  --->  self.preprocess_image(image)   --->  step observation output
    # @set_timeout(10)
    def _step(self, action):
        self._global_step += 1
        # print(self._global_step)
        if self.config["discrete_actions"]:
            action = DISCRETE_ACTIONS[int(action)]  # Carla action is 2D.
        assert len(action) == 2, "Invalid action {}".format(action)
        if self.config["squash_action_logits"]:
            forward = 2 * float(sigmoid(action[0]) - 0.5)
            throttle = float(np.clip(forward, 0, 1))
            brake = float(np.abs(np.clip(forward, -1, 0)))
            steer = 2 * float(sigmoid(action[1]) - 0.5)
        else:
            throttle = float(np.clip(action[0], 0, 1))
            brake = float(np.abs(np.clip(action[0], -1, 0)))
            steer = float(np.clip(action[1], -1, 1))

        # reverse and hand_brake are disabled.
        reverse = False
        hand_brake = False

        if self.config["verbose"]:
            print("steer", steer, "throttle", throttle, "brake", brake,
                  "reverse", reverse)

        self.client.send_control(steer=steer,
                                 throttle=throttle,
                                 brake=brake,
                                 hand_brake=hand_brake,
                                 reverse=reverse)

        # Process observations: self._read_observation() returns image and py_measurements.
        image, py_measurements = self._read_observation()
        if self.config["verbose"]:
            print("Next command", py_measurements["next_command"])
        if type(action) is np.ndarray:
            py_measurements["action"] = [float(a) for a in action]
        else:
            py_measurements["action"] = action
        py_measurements["control"] = {
            "steer": steer,
            "throttle": throttle,
            "brake": brake,
            "reverse": reverse,
            "hand_brake": hand_brake,
        }

        # compute reward
        reward = compute_reward(self, self.prev_measurement, py_measurements)

        self.total_reward += reward
        py_measurements["reward"] = reward
        py_measurements["total_reward"] = self.total_reward

        # done or not
        # done = False
        done = ((self.num_steps > self.scenario["max_steps"]
                 or py_measurements["next_command"] == "REACH_GOAL" or
                 (self.config["early_terminate_on_collision"]
                  and collided_done(py_measurements)))
                and self._global_step > 51)

        py_measurements["done"] = done
        self.prev_measurement = py_measurements

        # Write out measurements to file
        if self.config["verbose"] and CARLA_OUT_PATH:
            if not self.measurements_file:
                self.measurements_file = open(
                    os.path.join(
                        CARLA_OUT_PATH,
                        "measurements_{}.json".format(self.episode_id)), "w")
            self.measurements_file.write(json.dumps(py_measurements))
            self.measurements_file.write("\n")
            if done:
                self.measurements_file.close()
                self.measurements_file = None
                if self.config["convert_images_to_video"]:
                    self.images_to_video()

        self.num_steps += 1
        image = self.preprocess_image(image)
        info = np.array(COMMAND_ORDINAL[py_measurements["next_command"]],
                        np.float32).astype(np.float32)

        # print(self.encode_obs(image, py_measurements)[0].shape)
        return (self.encode_obs(image, py_measurements), reward, done, info)

    def images_to_video(self):
        videos_dir = os.path.join(CARLA_OUT_PATH, "Videos")
        if not os.path.exists(videos_dir):
            os.makedirs(videos_dir)
        ffmpeg_cmd = (
            "ffmpeg -loglevel -8 -r 60 -f image2 -s {x_res}x{y_res} "
            "-start_number 0 -i "
            "{img}_%04d.jpg -vcodec libx264 {vid}.mp4 && rm -f {img}_*.jpg "
        ).format(x_res=self.config["render_x_res"],
                 y_res=self.config["render_y_res"],
                 vid=os.path.join(videos_dir, self.episode_id),
                 img=os.path.join(CARLA_OUT_PATH, "CameraRGB",
                                  self.episode_id))
        print("Executing ffmpeg command", ffmpeg_cmd)
        subprocess.call(ffmpeg_cmd, shell=True)

    def preprocess_image(self, image):

        if self.config[
                "use_sensor"] == 'use_semantic':  # image.data: uint8(0 ~ 12)
            data = image.data * 21  # data: uint8(0 ~ 255)
            data = data.reshape(self.config["render_y_res"],
                                self.config["render_x_res"], 1)
            data = cv2.resize(data,
                              (self.config["x_res"], self.config["y_res"]),
                              interpolation=cv2.INTER_AREA)
            data = np.expand_dims(
                data, 2)  # data: uint8(0 ~ 255),  shape(y_res, x_res, 1)

        elif self.config["use_sensor"] == 'use_depth':  # depth: float64(0 ~ 1)
            # data = (image.data - 0.5) * 2
            data = image.data * 255  # data: float64(0 ~ 255)
            data = data.reshape(self.config["render_y_res"],
                                self.config["render_x_res"],
                                1)  # shape(render_y_res,render_x_res,1)
            data = cv2.resize(
                data, (self.config["x_res"], self.config["y_res"]),
                interpolation=cv2.INTER_AREA)  # shape(y_res, x_res)
            data = np.expand_dims(
                data, 2)  # data: float64(0 ~ 255),  shape(y_res, x_res, 1)

        elif self.config[
                "use_sensor"] == 'use_logdepth':  # depth: float64(0 ~ 1)
            data = (np.log(image.data) +
                    7.0) * 255.0 / 7.0  # data: float64(0 ~ 255)
            data = data.reshape(self.config["render_y_res"],
                                self.config["render_x_res"],
                                1)  # shape(render_y_res,render_x_res,1)
            data = cv2.resize(
                data, (self.config["x_res"], self.config["y_res"]),
                interpolation=cv2.INTER_AREA)  # shape(y_res, x_res)
            data = np.expand_dims(
                data, 2)  # data: float64(0 ~ 255),  shape(y_res, x_res, 1)

        elif self.config["use_sensor"] == 'use_rgb':
            data = image.data.reshape(self.config["render_y_res"],
                                      self.config["render_x_res"], 3)
            data = cv2.resize(
                data,
                (self.config["x_res"], self.config["y_res"]
                 ),  # data: uint8(0 ~ 255),  shape(y_res, x_res, 3)
                interpolation=cv2.INTER_AREA)
            # data = (data.astype(np.float32) - 128) / 128

        elif self.config["use_sensor"] == 'use_2rgb':
            data_l, data_r = image[0].data, image[0].data
            data_l = data_l.reshape(self.config["render_y_res"],
                                    self.config["render_x_res"], 3)
            data_r = data_r.reshape(self.config["render_y_res"],
                                    self.config["render_x_res"], 3)
            data_l = cv2.resize(data_l,
                                (self.config["x_res"], self.config["y_res"]),
                                interpolation=cv2.INTER_AREA)
            data_r = cv2.resize(data_r,
                                (self.config["x_res"], self.config["y_res"]),
                                interpolation=cv2.INTER_AREA)

            data = np.concatenate(
                (data_l, data_r),
                axis=2)  # data: uint8(0 ~ 255),  shape(y_res, x_res, 6)

        return data

    def _read_observation(self):
        # Read the data produced by the server this frame.
        measurements, sensor_data = self.client.read_data()

        # Print some of the measurements.
        if self.config["verbose"]:
            print_measurements(measurements)

        observation = None

        if self.config["use_sensor"] == 'use_semantic':
            camera_name = "CameraSemantic"

        elif self.config["use_sensor"] in ['use_depth', 'use_logdepth']:
            camera_name = "CameraDepth"

        elif self.config["use_sensor"] == 'use_rgb':
            camera_name = "CameraRGB"

        elif self.config["use_sensor"] == 'use_2rgb':
            camera_name = ["CameraRGB_L", "CameraRGB_R"]

        # for name, image in sensor_data.items():
        #     if name == camera_name:
        #         observation = image
        if camera_name == ["CameraRGB_L", "CameraRGB_R"]:
            observation = [
                sensor_data["CameraRGB_L"], sensor_data["CameraRGB_R"]
            ]
        else:
            observation = sensor_data[camera_name]

        cur = measurements.player_measurements

        if self.config["enable_planner"]:
            next_command = COMMANDS_ENUM[self.planner.get_next_command(
                [cur.transform.location.x, cur.transform.location.y, GROUND_Z],
                [
                    cur.transform.orientation.x, cur.transform.orientation.y,
                    GROUND_Z
                ],
                [self.end_pos.location.x, self.end_pos.location.y, GROUND_Z], [
                    self.end_pos.orientation.x, self.end_pos.orientation.y,
                    GROUND_Z
                ])]
        else:
            next_command = "LANE_FOLLOW"

        if next_command == "REACH_GOAL":
            distance_to_goal = 0.0  # avoids crash in planner
        elif self.config["enable_planner"]:
            distance_to_goal = self.planner.get_shortest_path_distance(
                [cur.transform.location.x, cur.transform.location.y, GROUND_Z],
                [
                    cur.transform.orientation.x, cur.transform.orientation.y,
                    GROUND_Z
                ],
                [self.end_pos.location.x, self.end_pos.location.y, GROUND_Z], [
                    self.end_pos.orientation.x, self.end_pos.orientation.y,
                    GROUND_Z
                ])
        else:
            distance_to_goal = -1

        distance_to_goal_euclidean = float(
            np.linalg.norm([
                cur.transform.location.x - self.end_pos.location.x,
                cur.transform.location.y - self.end_pos.location.y
            ]))

        py_measurements = {
            "episode_id": self.episode_id,
            "step": self.num_steps,
            "x": cur.transform.location.x,
            "y": cur.transform.location.y,
            "x_orient": cur.transform.orientation.x,
            "y_orient": cur.transform.orientation.y,
            "forward_speed": cur.forward_speed,
            "distance_to_goal": distance_to_goal,
            "distance_to_goal_euclidean": distance_to_goal_euclidean,
            "collision_vehicles": cur.collision_vehicles,
            "collision_pedestrians": cur.collision_pedestrians,
            "collision_other": cur.collision_other,
            "intersection_offroad": cur.intersection_offroad,
            "intersection_otherlane": cur.intersection_otherlane,
            "weather": self.weather,
            "map": self.config["server_map"],
            "start_coord": self.start_coord,
            "end_coord": self.end_coord,
            "current_scenario": self.scenario,
            "x_res": self.config["x_res"],
            "y_res": self.config["y_res"],
            "num_vehicles": self.scenario["num_vehicles"],
            "num_pedestrians": self.scenario["num_pedestrians"],
            "max_steps": self.scenario["max_steps"],
            "next_command": next_command,
        }

        if CARLA_OUT_PATH and self.config["log_images"]:
            for name, image in sensor_data.items():
                out_dir = os.path.join(CARLA_OUT_PATH, name)
                if not os.path.exists(out_dir):
                    os.makedirs(out_dir)
                out_file = os.path.join(
                    out_dir, "{}_{:>04}.jpg".format(self.episode_id,
                                                    self.num_steps))
                scipy.misc.imsave(out_file,
                                  image.data)  # image.data without preprocess.

        assert observation is not None, sensor_data
        return observation, py_measurements
Ejemplo n.º 10
0
class DrivingBenchmark(object):
    """
    The Benchmark class, controls the execution of the benchmark interfacing
    an Agent class with a set Suite.


    The benchmark class must be inherited with a class that defines the
    all the experiments to be run by the agent
    """
    def __init__(self,
                 city_name='Town01',
                 name_to_save='Test',
                 continue_experiment=False,
                 save_images=False,
                 distance_for_success=2.0):
        """
        Args
            city_name:
            name_to_save:
            continue_experiment:
            save_images:
            distance_for_success:
            collisions_as_failure: if this flag is set to true, episodes will terminate as failure, when the car collides.
        """

        self.__metaclass__ = abc.ABCMeta

        self._city_name = city_name
        self._base_name = name_to_save
        # The minimum distance for arriving into the goal point in
        # order to consider ir a success
        self._distance_for_success = distance_for_success
        # The object used to record the benchmark and to able to continue after
        self._recording = Recording(name_to_save=name_to_save,
                                    continue_experiment=continue_experiment,
                                    save_images=save_images)

        # We have a default planner instantiated that produces high level commands
        self._planner = Planner(city_name)
        self._map = self._planner._city_track.get_map()

        # TO keep track of the previous collisions
        self._previous_pedestrian_collision = 0
        self._previous_vehicle_collision = 0
        self._previous_other_collision = 0

    def benchmark_agent(self, experiment_suite, agent, client):
        """
        Function to benchmark the agent.
        It first check the log file for this benchmark.
        if it exist it continues from the experiment where it stopped.


        Args:
            experiment_suite
            agent: an agent object with the run step class implemented.
            client:


        Return:
            A dictionary with all the metrics computed from the
            agent running the set of experiments.
        """

        # Instantiate a metric object that will be used to compute the metrics for
        # the benchmark afterwards.
        metrics_object = Metrics(experiment_suite.metrics_parameters,
                                 experiment_suite.dynamic_tasks)

        # Function return the current pose and task for this benchmark.
        start_pose, start_experiment = self._recording.get_pose_and_experiment(
            experiment_suite.get_number_of_poses_task())

        logging.info('START')

        for experiment in experiment_suite.get_experiments(
        )[int(start_experiment):]:

            positions = client.load_settings(
                experiment.conditions).player_start_spots

            self._recording.log_start(experiment.task)

            for pose in experiment.poses[start_pose:]:
                for rep in range(experiment.repetitions):

                    start_index = pose[0]
                    end_index = pose[1]

                    client.start_episode(start_index)
                    # Print information on
                    logging.info('======== !!!! ==========')
                    logging.info(' Start Position %d End Position %d ',
                                 start_index, end_index)

                    self._recording.log_poses(start_index, end_index,
                                              experiment.Conditions.WeatherId)

                    # Calculate the initial distance for this episode
                    initial_distance = \
                        sldist(
                            [positions[start_index].location.x, positions[start_index].location.y],
                            [positions[end_index].location.x, positions[end_index].location.y])

                    time_out = experiment_suite.calculate_time_out(
                        self._get_shortest_path(positions[start_index],
                                                positions[end_index]))

                    logging.info('Timeout for Episode: %f', time_out)
                    # running the agent
                    (result, reward_vec, control_vec, final_time, remaining_distance, col_ped,
                     col_veh, col_oth, number_of_red_lights, number_of_green_lights) = \
                        self._run_navigation_episode(
                            agent, client, time_out, positions[end_index],
                            str(experiment.Conditions.WeatherId) + '_'
                            + str(experiment.task) + '_' + str(start_index)
                            + '.' + str(end_index), experiment_suite.metrics_parameters,
                            experiment_suite.collision_as_failure,
                            experiment_suite.traffic_light_as_failure)

                    # Write the general status of the just ran episode
                    self._recording.write_summary_results(
                        experiment, pose, rep, initial_distance,
                        remaining_distance, final_time, time_out, result,
                        col_ped, col_veh, col_oth, number_of_red_lights,
                        number_of_green_lights)

                    # Write the details of this episode.
                    self._recording.write_measurements_results(
                        experiment, rep, pose, reward_vec, control_vec)
                    if result > 0:
                        logging.info(
                            '+++++ Target achieved in %f seconds! +++++',
                            final_time)
                    else:
                        logging.info('----- Timeout! -----')

            start_pose = 0

        self._recording.log_end()

        return metrics_object.compute(self._recording.path)

    def get_path(self):
        """
        Returns the path were the log was saved.
        """
        return self._recording.path

    def _get_directions(self, current_point, end_point):
        """
        Class that should return the directions to reach a certain goal
        """

        directions = self._planner.get_next_command(
            (current_point.location.x, current_point.location.y, 0.22),
            (current_point.orientation.x, current_point.orientation.y,
             current_point.orientation.z),
            (end_point.location.x, end_point.location.y, 0.22),
            (end_point.orientation.x, end_point.orientation.y,
             end_point.orientation.z))
        return directions

    def _get_shortest_path(self, start_point, end_point):
        """
        Calculates the shortest path between two points considering the road netowrk
        """

        return self._planner.get_shortest_path_distance(
            [start_point.location.x, start_point.location.y, 0.22],
            [start_point.orientation.x, start_point.orientation.y, 0.22],
            [end_point.location.x, end_point.location.y, end_point.location.z],
            [
                end_point.orientation.x, end_point.orientation.y,
                end_point.orientation.z
            ])

    def _has_agent_collided(self, measurement, metrics_parameters):
        """
            This function must have a certain state and only look to one measurement.
        """
        collided_veh = 0
        collided_ped = 0
        collided_oth = 0

        if (measurement.collision_vehicles - self._previous_vehicle_collision) \
                > metrics_parameters['collision_vehicles']['threshold']/2.0:
            collided_veh = 1
        if (measurement.collision_pedestrians - self._previous_pedestrian_collision) \
                > metrics_parameters['collision_pedestrians']['threshold']/2.0:
            collided_ped = 1
        if (measurement.collision_other - self._previous_other_collision) \
                > metrics_parameters['collision_other']['threshold']/2.0:
            collided_oth = 1

        self._previous_pedestrian_collision = measurement.collision_pedestrians
        self._previous_vehicle_collision = measurement.collision_vehicles
        self._previous_other_collision = measurement.collision_other

        return collided_ped, collided_veh, collided_oth

    def _is_traffic_light_active(self, agent, orientation):

        x_agent = agent.traffic_light.transform.location.x
        y_agent = agent.traffic_light.transform.location.y

        def search_closest_lane_point(x_agent, y_agent, depth):
            step_size = 4
            if depth > 1:
                return None
            try:
                degrees = self._map.get_lane_orientation_degrees(
                    [x_agent, y_agent, 38])
                #print (degrees)
            except:
                return None

            if not self._map.is_point_on_lane([x_agent, y_agent, 38]):
                #print (" Not on lane ")
                result = search_closest_lane_point(x_agent + step_size,
                                                   y_agent, depth + 1)
                if result is not None:
                    return result
                result = search_closest_lane_point(x_agent,
                                                   y_agent + step_size,
                                                   depth + 1)
                if result is not None:
                    return result
                result = search_closest_lane_point(x_agent + step_size,
                                                   y_agent + step_size,
                                                   depth + 1)
                if result is not None:
                    return result
                result = search_closest_lane_point(x_agent + step_size,
                                                   y_agent - step_size,
                                                   depth + 1)
                if result is not None:
                    return result
                result = search_closest_lane_point(x_agent - step_size,
                                                   y_agent + step_size,
                                                   depth + 1)
                if result is not None:
                    return result
                result = search_closest_lane_point(x_agent - step_size,
                                                   y_agent, depth + 1)
                if result is not None:
                    return result
                result = search_closest_lane_point(x_agent,
                                                   y_agent - step_size,
                                                   depth + 1)
                if result is not None:
                    return result
                result = search_closest_lane_point(x_agent - step_size,
                                                   y_agent - step_size,
                                                   depth + 1)
                if result is not None:
                    return result
            else:
                #print(" ON Lane ")
                if degrees < 6:
                    return [x_agent, y_agent]
                else:
                    return None

        closest_lane_point = search_closest_lane_point(x_agent, y_agent, 0)
        car_direction = math.atan2(orientation.y, orientation.x) + 3.1415
        if car_direction > 6.0:
            car_direction -= 6.0

        return math.fabs(
            car_direction - self._map.get_lane_orientation_degrees(
                [closest_lane_point[0], closest_lane_point[1], 38])) < 1

    def _test_for_traffic_lights(self, measurement):
        """

        This function tests if the car passed into a traffic light, returning 'red'
        if it crossed a red light , 'green' if it crossed a green light or none otherwise

        Args:
            measurement: all the measurements collected by carla 0.8.4

        Returns:

        """
        def is_on_burning_point(_map, location):

            # We get the current lane orientation
            ori_x, ori_y = _map.get_lane_orientation(
                [location.x, location.y, 38])

            # We test to walk in direction of the lane
            future_location_x = location.x
            future_location_y = location.y

            for i in range(3):
                future_location_x += ori_x
                future_location_y += ori_y
            # Take a point on a intersection in the future
            location_on_intersection_x = future_location_x + 2 * ori_x
            location_on_intersection_y = future_location_y + 2 * ori_y

            if not _map.is_point_on_intersection([future_location_x,
                                                  future_location_y,
                                                  38]) and \
                    _map.is_point_on_intersection([location_on_intersection_x,
                                                   location_on_intersection_y,
                                                   38]):
                return True

            return False

        # Check nearest traffic light with the correct orientation state.

        player_x = measurement.player_measurements.transform.location.x
        player_y = measurement.player_measurements.transform.location.y

        # The vehicle is on an intersection
        # THIS IS THE PLACE TO VERIFY FOR A TL BURN

        for agent in measurement.non_player_agents:
            if agent.HasField('traffic_light'):
                if not self._map.is_point_on_intersection(
                    [player_x, player_y, 38]):
                    x_agent = agent.traffic_light.transform.location.x
                    y_agent = agent.traffic_light.transform.location.y
                    tl_vector, tl_dist = get_vec_dist(x_agent, y_agent,
                                                      player_x, player_y)
                    if self._is_traffic_light_active(
                            agent, measurement.player_measurements.transform.
                            orientation):
                        if is_on_burning_point(self._map,
                                               measurement.player_measurements.transform.location)\
                                and tl_dist < 6.0:
                            if agent.traffic_light.state != 0:  # Not green
                                return 'red'
                            else:
                                return 'green'

        return None

    def _run_navigation_episode(self, agent, client, time_out, target,
                                episode_name, metrics_parameters,
                                collision_as_failure,
                                traffic_light_as_failure):
        """
         Run one episode of the benchmark (Pose) for a certain agent.


        Args:
            agent: the agent object
            client: an object of the carla client to communicate
            with the CARLA simulator
            time_out: the time limit to complete this episode
            target: the target to reach
            episode_name: The name for saving images of this episode
            metrics_object: The metrics object to check for collisions

        """

        # Send an initial command.
        measurements, sensor_data = client.read_data()
        client.send_control(VehicleControl())

        initial_timestamp = measurements.game_timestamp
        current_timestamp = initial_timestamp

        # The vector containing all measurements produced on this episode
        measurement_vec = []
        # The vector containing all controls produced on this episode
        control_vec = []
        frame = 0
        distance = 10000
        col_ped, col_veh, col_oth = 0, 0, 0
        traffic_light_state, number_red_lights, number_green_lights = None, 0, 0
        fail = False
        success = False
        not_count = 0

        while not fail and not success:

            # Read data from server with the client
            measurements, sensor_data = client.read_data()
            # The directions to reach the goal are calculated.
            directions = self._get_directions(
                measurements.player_measurements.transform, target)
            # Agent process the data.
            control = agent.run_step(measurements, sensor_data, directions,
                                     target)
            # Send the control commands to the vehicle
            client.send_control(control)

            # save images if the flag is activated
            self._recording.save_images(sensor_data, episode_name, frame)

            current_x = measurements.player_measurements.transform.location.x
            current_y = measurements.player_measurements.transform.location.y

            logging.info("Controller is Inputting:")
            logging.info('Steer = %f Throttle = %f Brake = %f ', control.steer,
                         control.throttle, control.brake)

            current_timestamp = measurements.game_timestamp
            logging.info('Timestamp %f', current_timestamp)
            # Get the distance travelled until now

            distance = sldist([current_x, current_y],
                              [target.location.x, target.location.y])
            # Write status of the run on verbose mode
            logging.info('Status:')
            logging.info('[d=%f] c_x = %f, c_y = %f ---> t_x = %f, t_y = %f',
                         float(distance), current_x, current_y,
                         target.location.x, target.location.y)
            # Check if reach the target
            col_ped, col_veh, col_oth = self._has_agent_collided(
                measurements.player_measurements, metrics_parameters)
            # test if car crossed the traffic light
            traffic_light_state = self._test_for_traffic_lights(measurements)

            if traffic_light_state == 'red' and not_count == 0:
                number_red_lights += 1
                not_count = 20

            elif traffic_light_state == 'green' and not_count == 0:
                number_green_lights += 1
                not_count = 20

            else:
                not_count -= 1
                not_count = max(0, not_count)

            if distance < self._distance_for_success:
                success = True
            elif (current_timestamp - initial_timestamp) > (time_out * 1000):
                fail = True
            elif collision_as_failure and (col_ped or col_veh or col_oth):
                fail = True
            elif traffic_light_as_failure and traffic_light_state == 'red':
                fail = True
            logging.info('Traffic Lights:')
            logging.info('red %f green %f, total %f', number_red_lights,
                         number_green_lights,
                         number_red_lights + number_green_lights)
            # Increment the vectors and append the measurements and controls.
            frame += 1
            measurement_vec.append(measurements.player_measurements)
            control_vec.append(control)

        if success:
            return 1, measurement_vec, control_vec, float(
                current_timestamp - initial_timestamp) / 1000.0, distance,  col_ped, col_veh, col_oth, \
                   number_red_lights, number_green_lights
        return 0, measurement_vec, control_vec, time_out, distance, col_ped, col_veh, col_oth, \
            number_red_lights, number_green_lights
Ejemplo n.º 11
0
class DrivingBenchmark(object):
    """
    The Benchmark class, controls the execution of the benchmark interfacing
    an Agent class with a set Suite.


    The benchmark class must be inherited with a class that defines the
    all the experiments to be run by the agent
    """

    def __init__(
            self,
            city_name='Town01',
            name_to_save='Test',
            continue_experiment=False,
            save_images=False,
            distance_for_success=2.0
    ):

        self.__metaclass__ = abc.ABCMeta

        self._city_name = city_name
        self._base_name = name_to_save
        # The minimum distance for arriving into the goal point in
        # order to consider ir a success
        self._distance_for_success = distance_for_success
        # The object used to record the benchmark and to able to continue after
        self._recording = Recording(name_to_save=name_to_save,
                                    continue_experiment=continue_experiment,
                                    save_images=save_images
                                    )

        # We have a default planner instantiated that produces high level commands
        self._planner = Planner(city_name)

        self.carla_map = CarlaMap(city_name, 0.1653, 50)

    def benchmark_agent(self, experiment_suite, agent, client):
        """
        Function to benchmark the agent.
        It first check the log file for this benchmark.
        if it exist it continues from the experiment where it stopped.


        Args:
            experiment_suite
            agent: an agent object with the run step class implemented.
            client:


        Return:
            A dictionary with all the metrics computed from the
            agent running the set of experiments.
        """

        # Instantiate a metric object that will be used to compute the metrics for
        # the benchmark afterwards.
        metrics_object = Metrics(experiment_suite.metrics_parameters,
                                 experiment_suite.dynamic_tasks)

        # Function return the current pose and task for this benchmark.
        start_pose, start_experiment = self._recording.get_pose_and_experiment(
            experiment_suite.get_number_of_poses_task())

        logging.info('START')

        for experiment in experiment_suite.get_experiments()[int(start_experiment):]:

            positions = client.load_settings(
                experiment.conditions).player_start_spots

            self._recording.log_start(experiment.task)

            for pose in experiment.poses[start_pose:]:
                for rep in range(experiment.repetitions):

                    start_index = pose[0]
                    end_index = pose[1]

                    client.start_episode(start_index)
                    # Print information on
                    logging.info('======== !!!! ==========')
                    logging.info(' Start Position %d End Position %d ',
                                 start_index, end_index)

                    self._recording.log_poses(start_index, end_index,
                                              experiment.Conditions.WeatherId)

                    # Calculate the initial distance for this episode
                    initial_distance = \
                        sldist(
                            [positions[start_index].location.x, positions[start_index].location.y],
                            [positions[end_index].location.x, positions[end_index].location.y])

                    time_out = experiment_suite.calculate_time_out(
                        self._get_shortest_path(positions[start_index], positions[end_index]))

                    # running the agent
                    (result, reward_vec, control_vec, final_time, remaining_distance) = \
                        self._run_navigation_episode(
                            agent, client, time_out, positions[end_index],
                            str(experiment.Conditions.WeatherId) + '_'
                            + str(experiment.task) + '_' + str(start_index)
                            + '.' + str(end_index))

                    # Write the general status of the just ran episode
                    self._recording.write_summary_results(
                        experiment, pose, rep, initial_distance,
                        remaining_distance, final_time, time_out, result)

                    # Write the details of this episode.
                    self._recording.write_measurements_results(experiment, rep, pose, reward_vec,
                                                               control_vec)
                    if result > 0:
                        logging.info('+++++ Target achieved in %f seconds! +++++',
                                     final_time)
                    else:
                        logging.info('----- Timeout! -----')

            start_pose = 0

        self._recording.log_end()

        return metrics_object.compute(self._recording.path)

    def get_path(self):
        """
        Returns the path were the log was saved.
        """
        return self._recording.path

    def _get_directions(self, current_point, end_point):
        """
        Class that should return the directions to reach a certain goal
        """

        directions = self._planner.get_next_command(
            (current_point.location.x,
             current_point.location.y, 0.22),
            (current_point.orientation.x,
             current_point.orientation.y,
             current_point.orientation.z),
            (end_point.location.x, end_point.location.y, 0.22),
            (end_point.orientation.x, end_point.orientation.y, end_point.orientation.z))
        return directions

    def _get_shortest_path(self, start_point, end_point):
        """
        Calculates the shortest path between two points considering the road netowrk
        """

        return self._planner.get_shortest_path_distance(
            [
                start_point.location.x, start_point.location.y, 0.22], [
                start_point.orientation.x, start_point.orientation.y, 0.22], [
                end_point.location.x, end_point.location.y, end_point.location.z], [
                end_point.orientation.x, end_point.orientation.y, end_point.orientation.z])

    def load_empty_trajectory(self):
        original_path = "./drive_interfaces/carla/carla_client/carla/planner/" + self._city_name + ".png"
        self.trajectory_img = cv2.imread(original_path)

    def update_trajectory(self, curr_x, curr_y, tar_x, tar_y, is_first, directions):
        cur = self.carla_map.convert_to_pixel([curr_x, curr_y, .22])
        cur = [int(cur[1]), int(cur[0])]
        tar = self.carla_map.convert_to_pixel([tar_x, tar_y, .22])
        tar = [int(tar[1]), int(tar[0])]
        print("current location", cur, "final location", tar)
        if is_first:
            trj_sz = 10
        else:
            trj_sz = 3
        directions_to_color={0.0: [255, 0, 0], 2.0: [255, 0, 0], 5.0: [255, 0, 0],
                             3.0: [0, 255, 0], 4.0: [0, 0, 255]}
        color = directions_to_color[directions]
        self.trajectory_img[cur[0] - trj_sz:cur[0] + trj_sz, cur[1] - trj_sz:cur[1] + trj_sz, 0] = color[0]
        self.trajectory_img[cur[0] - trj_sz:cur[0] + trj_sz, cur[1] - trj_sz:cur[1] + trj_sz, 1] = color[1]
        self.trajectory_img[cur[0] - trj_sz:cur[0] + trj_sz, cur[1] - trj_sz:cur[1] + trj_sz, 2] = color[2]
        tar_sz = 10
        self.trajectory_img[tar[0] - tar_sz:tar[0] + tar_sz, tar[1] - tar_sz:tar[1] + tar_sz, 0] = 0
        self.trajectory_img[tar[0] - tar_sz:tar[0] + tar_sz, tar[1] - tar_sz:tar[1] + tar_sz, 1] = 0
        self.trajectory_img[tar[0] - tar_sz:tar[0] + tar_sz, tar[1] - tar_sz:tar[1] + tar_sz, 2] = 0

    def save_trajectory_image(self, episode_name):
        out_name = os.path.join(self._recording._path, '_images/episode_{:s}_trajectory.png'.format(episode_name))
        folder = os.path.dirname(out_name)
        if not os.path.isdir(folder):
            os.makedirs(folder)
        cv2.imwrite(out_name, self.trajectory_img)


    def measurements_to_pos_yaw(self, measurements):
        pos = [measurements.player_measurements.transform.location.x,
               measurements.player_measurements.transform.location.y ]

        ori = [measurements.player_measurements.transform.orientation.x,
               measurements.player_measurements.transform.orientation.y,
               measurements.player_measurements.transform.orientation.z ]

        city_name = {"Town01": "01",
                     "Town02": "02",
                     "RFS_MAP": "10",
                     "Exp_Town": "11",
                     "Exp_Town02": "11",
                     "Exp_Town01_02Shoulder": "13"}[self._city_name]
        return {"town_id": city_name, "pos": pos, "ori": ori}

    def _run_navigation_episode(
            self,
            agent,
            client,
            time_out,
            target,
            episode_name):
        """
         Run one episode of the benchmark (Pose) for a certain agent.


        Args:
            agent: the agent object
            client: an object of the carla client to communicate
            with the CARLA simulator
            time_out: the time limit to complete this episode
            target: the target to reach
            episode_name: The name for saving images of this episode

        """

        # Send an initial command.
        measurements, sensor_data = client.read_data()
        client.send_control(VehicleControl())

        initial_timestamp = measurements.game_timestamp
        current_timestamp = initial_timestamp

        # The vector containing all measurements produced on this episode
        measurement_vec = []
        # The vector containing all controls produced on this episode
        control_vec = []
        frame = 0
        distance = 10000
        success = False

        self.load_empty_trajectory()
        agent.debug_i = 0
        agent.temp_image_path = self._recording._path

        stuck_counter = 0
        pre_x = 0.0
        pre_y = 0.0
        last_collision_ago = 1000000000
        is_first = True

        while (current_timestamp - initial_timestamp) < (time_out * 1000) and not success:

            # Read data from server with the client
            measurements, sensor_data = client.read_data()
            # The directions to reach the goal are calculated.
            directions = self._get_directions(measurements.player_measurements.transform, target)
            # Agent process the data.
            control = agent.run_step(measurements, sensor_data, directions, target, self.measurements_to_pos_yaw(measurements))
            # Send the control commands to the vehicle
            client.send_control(control)

            # save images if the flag is activated
            #sensor_data = self._recording.yang_annotate_images(sensor_data, directions)
            #self._recording.save_images(sensor_data, episode_name, frame)

            current_x = measurements.player_measurements.transform.location.x
            current_y = measurements.player_measurements.transform.location.y

            logging.info("Controller is Inputting:")
            logging.info('Steer = %f Throttle = %f Brake = %f ',
                         control.steer, control.throttle, control.brake)

            # game timestamp is in microsecond
            current_timestamp = measurements.game_timestamp
            # Get the distance travelled until now
            distance = sldist([current_x, current_y],
                              [target.location.x, target.location.y])

            self.update_trajectory(current_x, current_y, target.location.x, target.location.y, is_first, directions)
            is_first = False

            # Write status of the run on verbose mode
            logging.info('Status:')
            logging.info(
                '[d=%f] c_x = %f, c_y = %f ---> t_x = %f, t_y = %f',
                float(distance), current_x, current_y, target.location.x,
                target.location.y)
            # Check if reach the target
            if distance < self._distance_for_success:
                success = True

            # Increment the vectors and append the measurements and controls.
            frame += 1
            measurement_vec.append(measurements.player_measurements)
            control_vec.append(control)

            pm = measurements.player_measurements
            if pm.collision_vehicles    > 0.0 \
              or pm.collision_pedestrians > 0.0 \
              or pm.collision_other       > 0.0:
                last_collision_ago = 0
            else:
                last_collision_ago += 1

            if sldist([current_x, current_y], [pre_x, pre_y]) < 0.1:
                stuck_counter += 1
            else:
                stuck_counter = 0
            pre_x = current_x
            pre_y = current_y
            if stuck_counter > 400:
                print("breaking because of stucking too long")
                break

            if stuck_counter > 30 and last_collision_ago < 50:
                #print("breaking because of collision and stuck")
                #break
                # disable the breaking
                pass

            if math.fabs(directions) < 0.1:
                # The goal state is reached.
                success = True

        # convert the images saved by the underlying to a video
        if self._recording._save_images:
            # convert the saved images to a video, and store it in the right place
            # we have to save the image within the carla_machine, since only that will know what's the cutting plane
            # assume that the images lies around at ./temp/%05d.png
            print("converting images to a video...")
            out_name = os.path.join(self._recording._path, '_images/episode_{:s}.mp4'.format(episode_name))

            folder = os.path.dirname(out_name)
            if not os.path.isdir(folder):
                os.makedirs(folder)

            cmd = ["ffmpeg", "-y", "-i",  agent.temp_image_path+"/%09d.png", "-c:v", "libx264", out_name]
            call(" ".join(cmd), shell=True)

            cmd = ["find", agent.temp_image_path, "-name", "00*png", "-print | xargs rm"]

            call(" ".join(cmd), shell=True)

            self.save_trajectory_image(episode_name)

        if success:
            return 1, measurement_vec, control_vec, float(
                current_timestamp - initial_timestamp) / 1000.0, distance
        return 0, measurement_vec, control_vec, time_out, distance
Ejemplo n.º 12
0
class DrivingBenchmark(object):
    """
    The Benchmark class, controls the execution of the benchmark interfacing
    an Agent class with a set Suite.


    The benchmark class must be inherited with a class that defines the
    all the experiments to be run by the agent
    """

    def __init__(
            self,
            city_name='Town01',
            name_to_save='Test',
            continue_experiment=False,
            save_images=False,
            distance_for_success=2.0
    ):

        self.__metaclass__ = abc.ABCMeta

        self._city_name = city_name
        self._base_name = name_to_save
        # The minimum distance for arriving into the goal point in
        # order to consider ir a success
        self._distance_for_success = distance_for_success
        # The object used to record the benchmark and to able to continue after
        self._recording = Recording(name_to_save=name_to_save,
                                    continue_experiment=continue_experiment,
                                    save_images=save_images
                                    )

        # We have a default planner instantiated that produces high level commands
        self._planner = Planner(city_name)

    def benchmark_agent(self, experiment_suite, agent, client):
        """
        Function to benchmark the agent.
        It first check the log file for this benchmark.
        if it exist it continues from the experiment where it stopped.


        Args:
            experiment_suite
            agent: an agent object with the run step class implemented.
            client:


        Return:
            A dictionary with all the metrics computed from the
            agent running the set of experiments.
        """

        # Instantiate a metric object that will be used to compute the metrics for
        # the benchmark afterwards.
        metrics_object = Metrics(experiment_suite.metrics_parameters,
                                 experiment_suite.dynamic_tasks)

        # Function return the current pose and task for this benchmark.
        start_pose, start_experiment = self._recording.get_pose_and_experiment(
            experiment_suite.get_number_of_poses_task())

        logging.info('START')

        for experiment in experiment_suite.get_experiments()[int(start_experiment):]:

            positions = client.load_settings(
                experiment.conditions).player_start_spots

            self._recording.log_start(experiment.task)

            for pose in experiment.poses[start_pose:]:
                for rep in range(experiment.repetitions):

                    start_index = pose[0]
                    end_index = pose[1]

                    client.start_episode(start_index)
                    # Print information on
                    logging.info('======== !!!! ==========')
                    logging.info(' Start Position %d End Position %d ',
                                 start_index, end_index)

                    self._recording.log_poses(start_index, end_index,
                                              experiment.Conditions.WeatherId)

                    # Calculate the initial distance for this episode
                    initial_distance = \
                        sldist(
                            [positions[start_index].location.x, positions[start_index].location.y],
                            [positions[end_index].location.x, positions[end_index].location.y])

                    time_out = experiment_suite.calculate_time_out(
                        self._get_shortest_path(positions[start_index], positions[end_index]))

                    # running the agent
                    (result, reward_vec, control_vec, final_time, remaining_distance) = \
                        self._run_navigation_episode(
                            agent, client, time_out, positions[end_index],
                            str(experiment.Conditions.WeatherId) + '_'
                            + str(experiment.task) + '_' + str(start_index)
                            + '.' + str(end_index))

                    # Write the general status of the just ran episode
                    self._recording.write_summary_results(
                        experiment, pose, rep, initial_distance,
                        remaining_distance, final_time, time_out, result)

                    # Write the details of this episode.
                    self._recording.write_measurements_results(experiment, rep, pose, reward_vec,
                                                               control_vec)
                    if result > 0:
                        logging.info('+++++ Target achieved in %f seconds! +++++',
                                     final_time)
                    else:
                        logging.info('----- Timeout! -----')

            start_pose = 0

        self._recording.log_end()

        return metrics_object.compute(self._recording.path)

    def get_path(self):
        """
        Returns the path were the log was saved.
        """
        return self._recording.path

    def _get_directions(self, current_point, end_point):
        """
        Class that should return the directions to reach a certain goal
        """

        directions = self._planner.get_next_command(
            (current_point.location.x,
             current_point.location.y, 0.22),
            (current_point.orientation.x,
             current_point.orientation.y,
             current_point.orientation.z),
            (end_point.location.x, end_point.location.y, 0.22),
            (end_point.orientation.x, end_point.orientation.y, end_point.orientation.z))
        return directions

    def _get_shortest_path(self, start_point, end_point):
        """
        Calculates the shortest path between two points considering the road netowrk
        """

        return self._planner.get_shortest_path_distance(
            [
                start_point.location.x, start_point.location.y, 0.22], [
                start_point.orientation.x, start_point.orientation.y, 0.22], [
                end_point.location.x, end_point.location.y, end_point.location.z], [
                end_point.orientation.x, end_point.orientation.y, end_point.orientation.z])

    def _run_navigation_episode(
            self,
            agent,
            client,
            time_out,
            target,
            episode_name):
        """
         Run one episode of the benchmark (Pose) for a certain agent.


        Args:
            agent: the agent object
            client: an object of the carla client to communicate
            with the CARLA simulator
            time_out: the time limit to complete this episode
            target: the target to reach
            episode_name: The name for saving images of this episode

        """

        # Send an initial command.
        measurements, sensor_data = client.read_data()
        client.send_control(VehicleControl())

        initial_timestamp = measurements.game_timestamp
        current_timestamp = initial_timestamp

        # The vector containing all measurements produced on this episode
        measurement_vec = []
        # The vector containing all controls produced on this episode
        control_vec = []
        frame = 0
        distance = 10000
        success = False

        while (current_timestamp - initial_timestamp) < (time_out * 1000) and not success:

            # Read data from server with the client
            measurements, sensor_data = client.read_data()
            # The directions to reach the goal are calculated.
            directions = self._get_directions(measurements.player_measurements.transform, target)
            # Agent process the data.
            control = agent.run_step(measurements, sensor_data, directions, target)
            # Send the control commands to the vehicle
            client.send_control(control)

            # save images if the flag is activated
            self._recording.save_images(sensor_data, episode_name, frame)

            current_x = measurements.player_measurements.transform.location.x
            current_y = measurements.player_measurements.transform.location.y

            logging.info("Controller is Inputting:")
            logging.info('Steer = %f Throttle = %f Brake = %f ',
                         control.steer, control.throttle, control.brake)

            current_timestamp = measurements.game_timestamp
            # Get the distance travelled until now
            distance = sldist([current_x, current_y],
                              [target.location.x, target.location.y])
            # Write status of the run on verbose mode
            logging.info('Status:')
            logging.info(
                '[d=%f] c_x = %f, c_y = %f ---> t_x = %f, t_y = %f',
                float(distance), current_x, current_y, target.location.x,
                target.location.y)
            # Check if reach the target
            if distance < self._distance_for_success:
                success = True

            # Increment the vectors and append the measurements and controls.
            frame += 1
            measurement_vec.append(measurements.player_measurements)
            control_vec.append(control)

        if success:
            return 1, measurement_vec, control_vec, float(
                current_timestamp - initial_timestamp) / 1000.0, distance
        return 0, measurement_vec, control_vec, time_out, distance
Ejemplo n.º 13
0
class DrivingBenchmark(object):
    """
    The Benchmark class, controls the execution of the benchmark interfacing
    an Agent class with a set Suite.


    The benchmark class must be inherited with a class that defines the
    all the experiments to be run by the agent
    """
    def __init__(self,
                 city_name='Town01',
                 name_to_save='Test',
                 continue_experiment=False,
                 save_images=False,
                 distance_for_success=2.0):
        """
        Args
            city_name:
            name_to_save:
            continue_experiment:
            save_images:
            distance_for_success:
            collisions_as_failure: if this flag is set to true, episodes will terminate as failure, when the car collides.
        """

        self.__metaclass__ = abc.ABCMeta

        self._city_name = city_name
        self._base_name = name_to_save
        # The minimum distance for arriving into the goal point in
        # order to consider ir a success
        self._distance_for_success = distance_for_success
        # The object used to record the benchmark and to able to continue after
        self._recording = Recording(name_to_save=name_to_save,
                                    continue_experiment=continue_experiment,
                                    save_images=save_images)

        # We have a default planner instantiated that produces high level commands
        self._planner = Planner(city_name)

        # TO keep track of the previous collisions
        self._previous_pedestrian_collision = 0
        self._previous_vehicle_collision = 0
        self._previous_other_collision = 0

    def benchmark_agent(self, experiment_suite, agent, client):
        """
        Function to benchmark the agent.
        It first check the log file for this benchmark.
        if it exist it continues from the experiment where it stopped.


        Args:
            experiment_suite
            agent: an agent object with the run step class implemented.
            client:


        Return:
            A dictionary with all the metrics computed from the
            agent running the set of experiments.sldi
        """

        # Instantiate a metric object that will be used to compute the metrics for
        # the benchmark afterwards.
        metrics_object = Metrics(experiment_suite.metrics_parameters,
                                 experiment_suite.dynamic_tasks)

        # Function return the current pose and task for this benchmark.
        start_pose, start_experiment = self._recording.get_pose_and_experiment(
            experiment_suite.get_number_of_poses_task())

        logging.info('START')

        for experiment in experiment_suite.get_experiments(
        )[int(start_experiment):]:

            positions = client.load_settings(
                experiment.conditions).player_start_spots
            t = len(positions)
            self._recording.log_start(experiment.task)

            for pose in experiment.poses[start_pose:]:
                for rep in range(experiment.repetitions):

                    start_index = pose[0]
                    end_index = pose[1]

                    client.start_episode(start_index)
                    # Print information on
                    logging.info('======== !!!! ==========')
                    logging.info(' Start Position %d End Position %d ',
                                 start_index, end_index)

                    self._recording.log_poses(start_index, end_index,
                                              experiment.Conditions.WeatherId)

                    # Calculate the initial distance for this episode
                    initial_distance = \
                        sldist(
                            [positions[start_index].location.x, positions[start_index].location.y],
                            [positions[end_index].location.x, positions[end_index].location.y])

                    time_out = experiment_suite.calculate_time_out(
                        self._get_shortest_path(positions[start_index],
                                                positions[end_index]))

                    # running the agent
                    (result, reward_vec, control_vec, final_time, remaining_distance, col_ped, col_veh, col_oth,metList) = \
                        self._run_navigation_episode(
                            agent, client, time_out, positions[end_index],
                            str(experiment.Conditions.WeatherId) + '_'
                            + str(experiment.task) + '_' + str(start_index)
                            + '.' + str(end_index), experiment_suite.metrics_parameters,
                            experiment_suite.collision_as_failure)

                    # Write the general status of the just ran episode
                    self._recording.write_summary_results(
                        experiment, pose, rep, initial_distance,
                        remaining_distance, final_time, time_out, result,
                        col_ped, col_veh, col_oth)

                    # Write the details of this episode.
                    self._recording.write_measurements_results(
                        experiment, rep, pose, reward_vec, control_vec,
                        metList)
                    if result > 0:
                        logging.info(
                            '+++++ Target achieved in %f seconds! +++++',
                            final_time)
                    else:
                        logging.info('----- Timeout! -----')

            start_pose = 0

        self._recording.log_end()

        return metrics_object.compute(self._recording.path)

    def get_path(self):
        """
        Returns the path were the log was saved.
        """
        return self._recording.path

    def _get_directions(self, current_point, end_point):
        """
        Class that should return the directions to reach a certain goal
        """

        directions = self._planner.get_next_command(
            (current_point.location.x, current_point.location.y, 0.22),
            (current_point.orientation.x, current_point.orientation.y,
             current_point.orientation.z),
            (end_point.location.x, end_point.location.y, 0.22),
            (end_point.orientation.x, end_point.orientation.y,
             end_point.orientation.z))
        return directions

    def _get_shortest_path(self, start_point, end_point):
        """
        Calculates the shortest path between two points considering the road netowrk
        """

        return self._planner.get_shortest_path_distance(
            [start_point.location.x, start_point.location.y, 0.22],
            [start_point.orientation.x, start_point.orientation.y, 0.22],
            [end_point.location.x, end_point.location.y, end_point.location.z],
            [
                end_point.orientation.x, end_point.orientation.y,
                end_point.orientation.z
            ])

    def _has_agent_collided(self, measurement, metrics_parameters):
        """
            This function must have a certain state and only look to one measurement.
        """
        collided_veh = 0
        collided_ped = 0
        collided_oth = 0

        if (measurement.collision_vehicles - self._previous_vehicle_collision) \
                > metrics_parameters['collision_vehicles']['threshold']/2.0:
            collided_veh = 1
        if (measurement.collision_pedestrians - self._previous_pedestrian_collision) \
                > metrics_parameters['collision_pedestrians']['threshold']/2.0:
            collided_ped = 1
        if (measurement.collision_other - self._previous_other_collision) \
                > metrics_parameters['collision_other']['threshold']/2.0:
            collided_oth = 1

        self._previous_pedestrian_collision = measurement.collision_pedestrians
        self._previous_vehicle_collision = measurement.collision_other

        return collided_ped, collided_veh, collided_oth

    def _is_agent_stuck(self, measurements, stuck_vec, old_coll):
        # break the episode when the agent is stuck on a static object
        coll_other = measurements.collision_other
        coll_other -= old_coll
        otherlane = measurements.intersection_otherlane > 0.4
        offroad = measurements.intersection_offroad > 0.3
        logging.info(
            "offroad: {}, otherlane: {}, coll_other: {}, old_coll: {}".format(
                offroad, otherlane, coll_other, old_coll))

        # if still driving or got unstuck (v > 4km/h)
        if measurements.forward_speed * 3.6 > 4:
            cycle_signal(stuck_vec, 0)
            if coll_other:
                old_coll += coll_other
        elif offroad or otherlane or coll_other:
            cycle_signal(stuck_vec, 1)
        else:
            cycle_signal(stuck_vec, 0)

        return all(stuck_vec), stuck_vec, old_coll

    def _run_navigation_episode(self, agent, client, time_out, target,
                                episode_name, metrics_parameters,
                                collision_as_failure):
        """
         Run one episode of the benchmark (Pose) for a certain agent.


        Args:
            agent: the agent object
            client: an object of the carla client to communicate
            with the CARLA simulator
            time_out: the time limit to complete this episode
            target: the target to reach
            episode_name: The name for saving images of this episode
            metrics_object: The metrics object to check for collisions

        """

        # Send an initial command.
        time.sleep(2)
        measurements, sensor_data = client.read_data()
        client.send_control(VehicleControl())

        ### Reset CAL agent
        agent.reset_state()

        initial_timestamp = measurements.game_timestamp
        current_timestamp = initial_timestamp

        # The vector containing all measurements produced on this episode
        measurement_vec = []
        # The vector containing all controls produced on this episode
        control_vec = []
        frame = 0
        distance = 10000
        col_ped, col_veh, col_oth = 0, 0, 0
        fail = False
        success = False

        ### own metrics
        stuck_vec = [0] * 60  # measure for 60 frames (6 seconds)
        center_distance_vec = []
        old_collision_value = 0
        direction_vec = []

        #edited ash
        metList = []

        while not fail and not success:

            # Read data from server with the client
            measurements, sensor_data = client.read_data()
            # The directions to reach the goal are calculated.
            directions = self._get_directions(
                measurements.player_measurements.transform, target)
            # Agent process the data.
            curr_time = time.time()
            control, prediction = agent.run_step(measurements, sensor_data,
                                                 directions, target)
            control.steer = measurements.player_measurements.autopilot_control.steer
            curr_time = time.time() - curr_time

            # Send the control commands to the vehicle
            client.send_control(control)

            # save images if the flag is activated
            self._recording.save_images(sensor_data, episode_name, frame)

            current_x = measurements.player_measurements.transform.location.x
            current_y = measurements.player_measurements.transform.location.y

            logging.info("Controller is Inputting:")
            logging.info('Steer = %f Throttle = %f Brake = %f ', control.steer,
                         control.throttle, control.brake)

            current_timestamp = measurements.game_timestamp
            # Get the distance travelled until now

            distance = sldist([current_x, current_y],
                              [target.location.x, target.location.y])
            # Write status of the run on verbose mode
            logging.info('Distance to target: %f', float(distance))

            # Check if reach the target
            col_ped, col_veh, col_oth = self._has_agent_collided(
                measurements.player_measurements, metrics_parameters)

            ### CHANGE TO ORIGINAL CODE #####################
            is_stuck, stuck_vec, old_collision_value = self._is_agent_stuck(
                measurements.player_measurements, stuck_vec,
                old_collision_value)

            if distance < self._distance_for_success:
                success = True
            elif (current_timestamp - initial_timestamp) > (time_out * 1000):
                fail = True
            elif is_stuck:
                fail = True
            elif collision_as_failure and (col_ped or col_veh or col_oth):
                fail = True

            time_remain = (time_out * 1000 -
                           (current_timestamp - initial_timestamp)) / 1000
            logging.info('Time remaining: %i m %i s', time_remain / 60,
                         time_remain % 60)
            logging.info('')

            # Increment the vectors and append the measurements and controls.
            frame += 1
            measurement_vec.append(measurements.player_measurements)
            control_vec.append(control)
            temp = agent.getMetData()
            temp['game_timestamp'] = measurements.game_timestamp
            metList.append(temp)
            print("stp number: ", frame, " ", temp, " elapsed time: ",
                  curr_time)

            #my code...
            player = {}
            peds = []
            px = measurements.player_measurements.transform.location.x
            py = measurements.player_measurements.transform.location.y

            player['pos_x'] = px
            player['pos_y'] = py

            yaw = measurements.player_measurements.transform.rotation.yaw
            yaw = math.radians(yaw)
            sfile = open(
                '/home/self-driving/Desktop/CARLA_0.8.2/Indranil/CAL-master_copy_new/python_client/_benchmarks_results/sigData.csv',
                'a+')
            pfile = open(
                '/home/self-driving/Desktop/CARLA_0.8.2/Indranil/CAL-master_copy_new/python_client/_benchmarks_results/pData.csv',
                'a+')

            hazard = False

            sig = False
            sigState = ''

            speed_post = False
            speed_post_state = ''

            veh_inside_box = False
            veh_dist = 50.0
            '''
            prediction keys:
            center_distance
            hazard_stop
            red_light
            relative_angle
            speed_sign
            veh_distance 
            '''
            front_axel_x = 2.33

            for a in measurements.non_player_agents:

                if a.HasField('traffic_light'):

                    llx, lly = a.traffic_light.transform.location.x, a.traffic_light.transform.location.y

                    nx, ny = self.getNewCord((px, py), (llx, lly), yaw)

                    if self.inside_a2(nx, ny):
                        print("\n--traffic light found---!!")
                        print("State :", a.traffic_light.state)
                        sig = True
                        sigState = str(a.traffic_light.state)

                if a.HasField('speed_limit_sign'):
                    llx, lly = a.speed_limit_sign.transform.location.x, a.speed_limit_sign.transform.location.y
                    nx, ny = self.getNewCord((px, py), (llx, lly), yaw)
                    if self.inside_a2(nx, ny):
                        print("\n--Speed Sign found---!!")
                        print("Limit :", a.speed_limit_sign.speed_limit * 3.6)
                        speed_post = True
                        speed_post_state = str(
                            math.floor(a.speed_limit_sign.speed_limit * 3.6))

                if a.HasField('pedestrian'):
                    llx, lly = a.pedestrian.transform.location.x, a.pedestrian.transform.location.y
                    ped = {}
                    ped['pos_x'] = llx
                    ped['pos_y'] = lly
                    peds.append(ped)
                    nx, ny = self.getNewCord((px, py), (llx, lly), yaw)
                    if self.inside_a1(nx, ny):
                        print("\n--Pedestrian Hazard---!!")
                        print("Pedestrian in front ")
                        hazard = True

                if a.HasField('vehicle'):
                    llx, lly = a.vehicle.transform.location.x, a.vehicle.transform.location.y
                    nx, ny = self.getNewCord((px, py), (llx, lly), yaw)
                    hlen = a.vehicle.bounding_box.extent.x
                    if self.inside_a3(nx, ny):
                        distan = self.getdis(0 + front_axel_x, 0, nx,
                                             ny - hlen)
                        veh_inside_box = True
                        veh_dist = min(distan, veh_dist)
                        print("Vehicle in front distance,", distan,
                              " Predicted ", prediction['veh_distance'])

            player['peds'] = peds

            #write to file if hazard stop is true in current frame...
            if hazard is True:
                sfile.write(
                    str('{:0>6d}'.format(frame)) + ",Hazard," + str(hazard) +
                    "," + str(prediction['hazard_stop'][0]) + "," +
                    str(prediction['hazard_stop'][1]) + "\n")
            else:
                sfile.write(
                    str('{:0>6d}'.format(frame)) + ",Hazard," + str(False) +
                    "," + str(prediction['hazard_stop'][0]) + "," +
                    str(prediction['hazard_stop'][1]) + "\n")
            if sig is True:
                sfile.write(
                    str('{:0>6d}'.format(frame)) + "," + "Traffic," +
                    sigState + "," + str(prediction['red_light'][0]) + "," +
                    str(prediction['red_light'][1]) + "\n")
                print("Traffic light.. Actual:", a.traffic_light.state,
                      " Predicted", prediction['red_light'][0])
            else:
                sfile.write(
                    str('{:0>6d}'.format(frame)) + "," + "Traffic," + '0' +
                    "," + str(prediction['red_light'][0]) + "," +
                    str(prediction['red_light'][1]) + "\n")
            if speed_post is True:
                sfile.write(
                    str('{:0>6d}'.format(frame)) + ",SpeedSign," +
                    speed_post_state + "," + str(prediction['speed_sign'][0]) +
                    "," + str(prediction['speed_sign'][1]) + "\n")
            else:
                sfile.write(
                    str('{:0>6d}'.format(frame)) + ",SpeedSign," + '-1' + "," +
                    str(prediction['speed_sign'][0]) + "," +
                    str(prediction['speed_sign'][1]) + "\n")

            if veh_inside_box is True:
                sfile.write(
                    str('{:0>6d}'.format(frame)) + ",Vehicle," +
                    str(veh_dist) + "," + str(prediction['veh_distance']) +
                    "\n")
            else:
                sfile.write(
                    str('{:0>6d}'.format(frame)) + ",Vehicle," + str(50.0) +
                    "," + str(prediction['veh_distance']) + "\n")

            sfile.write(
                str('{:0>6d}'.format(frame)) + ",CenterDist," +
                str(temp['centerDist']) + "," +
                str(prediction['center_distance']) + "\n")

            sfile.write(
                str('{:0>6d}'.format(frame)) + ",Angel," + str(yaw) + "," +
                str(prediction['relative_angle']) + "\n")

            jout = json.dumps(player)
            pfile.write(jout + "\n")

        if success:
            return 1, measurement_vec, control_vec, float(
                current_timestamp - initial_timestamp
            ) / 1000.0, distance, col_ped, col_veh, col_oth, metList
        return 0, measurement_vec, control_vec, time_out, distance, col_ped, col_veh, col_oth, metList

    def getNewCord(self, origin, point, psi):
        a = np.array([[math.cos(psi), -math.sin(psi)],
                      [math.sin(psi), math.cos(psi)]])
        b = np.array([(point[0] - origin[0]), (point[1] - origin[1])])
        x = np.linalg.solve(a, b)
        return x

    def getdis(self, x1, y1, x2, y2):
        return math.sqrt((x2 - x1)**2 + (y2 - y1)**2)

    def inside_a2(self, x, y):
        front_axel_x = 0
        if (x >= (7.4 + front_axel_x) and x <=
            (14.0 + front_axel_x)) and (y >= 0.8 and y <= 5.8):
            return True
        return False

    def inside_a1(self, x, y):
        front_axel_x = 2.33
        if (x >= (0.0 + front_axel_x) and x <=
            (8.2 + front_axel_x)) and (y >= -2.0 and y <= 2.0):
            return True
        return False

    def inside_a3(self, x, y):
        front_axel_x = 2.33
        if (x >= (0 + front_axel_x) and x <=
            (50 + front_axel_x)) and (y >= -1.6 and y <= 1.6):
            return True
        return False
Ejemplo n.º 14
0
class StraightDriveEnv(CarlaEnv):
    def __init__(self,
                 client,
                 frame_skip=1,
                 cam_width=800,
                 cam_height=600,
                 town_string='Town01'):
        super(StraightDriveEnv, self).__init__()
        self.frame_skip = frame_skip
        self.client = client
        self._planner = Planner(town_string)

        camera0 = Camera('CameraRGB')
        camera0.set(CameraFOV=100)
        camera0.set_image_size(cam_height, cam_width)
        camera0.set_position(200, 0, 140)
        camera0.set_rotation(-15.0, 0, 0)

        self.start_goal_pairs = [[36, 40], [39, 35], [110, 114], [7,
                                                                  3], [0, 4],
                                 [68, 50], [61, 59], [47, 64], [147, 90],
                                 [33, 87], [26, 19], [80, 76], [45, 49],
                                 [55, 44], [29, 107], [95, 104], [84, 34],
                                 [53, 67], [22, 17], [91, 148], [20, 107],
                                 [78, 70], [95, 102], [68, 44], [45, 69]]

        vehicles = 0
        pedestrians = 0
        weather = 1

        settings = CarlaSettings()
        settings.set(
            SynchronousMode=True,
            SendNonPlayerAgentsInfo=True,
            NumberOfVehicles=vehicles,
            NumberOfPedestrians=pedestrians,
            WeatherId=weather,
        )

        settings.randomize_seeds()
        settings.add_sensor(camera0)

        self.scene = self.client.load_settings(settings)

        img_shape = (cam_width, cam_height, 3)
        self.observation_space = spaces.Tuple(
            (spaces.Box(-np.inf, np.inf, (3, )), spaces.Box(0, 255,
                                                            img_shape)))
        self.action_space = spaces.Box(-np.inf, np.inf, shape=(3, ))

        self.prev_state = np.array([0., 0., 0.])
        self.prev_collisions = np.array([0., 0., 0.])
        self.prev_intersections = np.array([0., 0.])

    def step(self, action):
        steer = np.clip(action[0], -1, 1)
        throttle = np.clip(action[1], 0, 1)
        brake = np.clip(action[2], 0, 1)
        for i in range(self.frame_skip):
            self.t += 1
            self.client.send_control(steer=steer,
                                     throttle=throttle,
                                     brake=brake,
                                     hand_brake=False,
                                     reverse=False)

        measurements, sensor_data = self.client.read_data()
        sensor_data = self._process_image(sensor_data['CameraRGB'])
        current_time = measurements.game_timestamp

        state, collisions, intersections, onehot = self._process_measurements(
            measurements)
        reward, dist_goal = self._calculate_reward(state, collisions,
                                                   intersections)
        done = self._calculate_done(collisions, state, current_time)

        self.prev_state = np.array(state)
        self.prev_collisions = np.array(collisions)
        self.prev_intersections = np.array(intersections)

        measurement_obs = self._generate_obs(state, collisions, onehot,
                                             dist_goal)

        return (measurement_obs, sensor_data), reward, done, {}

    def reset(self):
        self._generate_start_goal_pair()

        print('Starting new episode...')
        # Blocking function until episode is ready
        self.client.start_episode(self.start_idx)

        measurements, sensor_data = self.client.read_data()
        sensor_data = self._process_image(sensor_data['CameraRGB'])

        state, collisions, intersections, onehot = self._process_measurements(
            measurements)

        self.prev_state = np.array(state)
        self.prev_collisions = np.array(collisions)
        self.prev_intersections = np.array(intersections)

        self.start_time = measurements.game_timestamp
        self.t = 0

        pos = np.array(state[0:2])
        dist_goal = np.linalg.norm(pos - self.goal)

        measurement_obs = self._generate_obs(state, collisions, onehot,
                                             dist_goal)

        return (measurement_obs, sensor_data)

    def _calculate_done(self, collisions, state, current_time):
        pos = np.array(state[0:2])
        dist_goal = np.linalg.norm(pos - self.goal)

        return self._is_timed_out(current_time) or self._is_goal(dist_goal)

        # Not described in paper, but should be there for safe driving
        return self._is_timed_out() and self._collision_on_step(dist_goal)

    def _calculate_planner_onehot(self, measurements):
        # return np.array([0, 0, 0, 0, 1]) # TODO need to debug
        print(type(self.end_point.location), type(self.end_point.orientation))
        val = self._planner.get_next_command(measurements.location,
                                             measurements.orientation,
                                             self.end_point.location,
                                             self.end_point.orientation)

        if val == 0.0:
            return np.array([1, 0, 0, 0, 0])

        onehot = np.zeros(5)
        val = int(val) - 1
        onehot[val] = 1

        return onehot

    def _calculate_reward(self, state, collisions, intersections):
        pos = np.array(state[0:2])
        dist_goal = np.linalg.norm(pos - self.goal)
        dist_goal_prev = np.linalg.norm(self.prev_state[0:2] - self.goal)

        speed = state[2]

        # TODO: Check this?
        r = 1000 * (dist_goal_prev - dist_goal) / 10 + 0.05 * (speed - self.prev_state[2]) \
         - 2 * (sum(intersections) - sum(self.prev_intersections))

        return r, dist_goal

    def _calculate_timeout(self):
        self.timeout_t = (
            (self.timeout_dist / 100000.0) / 10.0) * 3600.0 + 10.0

    def _collision_on_step(self, collisions):
        return sum(collisions) > 0

    def _generate_obs(self, state, collisions, onehot, dist_goal):
        speed = state[2]
        collisions = np.sum(collisions)

        return np.concatenate((np.array([speed, dist_goal,
                                         collisions]), np.array(onehot)))

    def _generate_start_goal_pair(self):
        # Choose one player start at random.
        self.position_index = np.random.randint(0,
                                                len(self.start_goal_pairs) - 1)
        self.start_idx = self.start_goal_pairs[self.position_index][0]
        self.goal_idx = self.start_goal_pairs[self.position_index][1]

        start_point = self.scene.player_start_spots[self.start_idx]
        end_point = self.scene.player_start_spots[self.goal_idx]
        self.end_point = end_point

        self.goal = [end_point.location.x / 100,
                     end_point.location.y / 100]  # cm -> m

        self.timeout_dist = self._planner.get_shortest_path_distance(
            [start_point.location.x, start_point.location.y, 22],
            [start_point.orientation.x, start_point.orientation.y, 22],
            [end_point.location.x, end_point.location.y, 22],
            [end_point.orientation.x, end_point.orientation.y, 22])

        self._calculate_timeout()

    def _is_goal(self, distance):
        return distance < 2.0

    def _is_timed_out(self, current_time):
        return (current_time - self.start_time) > (self.timeout_t * 1000)

    def _process_image(self, carla_raw_img):
        return resize(to_rgb_array(carla_raw_img), (84, 84))

    def _process_measurements(self, measurements):
        player_measurements = measurements.player_measurements

        pos_x = player_measurements.transform.location.x / 100  # cm -> m
        pos_y = player_measurements.transform.location.y / 100
        speed = player_measurements.forward_speed

        col_cars = player_measurements.collision_vehicles
        col_ped = player_measurements.collision_pedestrians
        col_other = player_measurements.collision_other

        other_lane = player_measurements.intersection_otherlane
        offroad = player_measurements.intersection_offroad

        onehot = self._calculate_planner_onehot(player_measurements.transform)

        return np.array([pos_x, pos_y,
                         speed]), np.array([col_cars, col_ped, col_other
                                            ]), np.array([other_lane,
                                                          offroad]), onehot
Ejemplo n.º 15
0
class DrivingBenchmark(object):
    """
    The Benchmark class, controls the execution of the benchmark interfacing
    an Agent class with a set Suite.


    The benchmark class must be inherited with a class that defines the
    all the experiments to be run by the agent
    """
    def __init__(self,
                 city_name='Town01',
                 name_to_save='Test',
                 continue_experiment=False,
                 save_images=False,
                 distance_for_success=2.0):
        """
        Args
            city_name:
            name_to_save:
            continue_experiment:
            save_images:
            distance_for_success:
            collisions_as_failure: if this flag is set to true, episodes will terminate as failure, when the car collides.
        """

        self.__metaclass__ = abc.ABCMeta

        self._city_name = city_name
        self._base_name = name_to_save
        # The minimum distance for arriving into the goal point in
        # order to consider ir a success
        self._distance_for_success = distance_for_success
        # The object used to record the benchmark and to able to continue after
        self._recording = Recording(name_to_save=name_to_save,
                                    continue_experiment=continue_experiment,
                                    save_images=save_images)

        # We have a default planner instantiated that produces high level commands
        self._planner = Planner(city_name)

        # TO keep track of the previous collisions
        self._previous_pedestrian_collision = 0
        self._previous_vehicle_collision = 0
        self._previous_other_collision = 0

    def benchmark_agent(self, experiment_suite, agent, client):
        """
        Function to benchmark the agent.
        It first check the log file for this benchmark.
        if it exist it continues from the experiment where it stopped.


        Args:
            experiment_suite
            agent: an agent object with the run step class implemented.
            client:


        Return:
            A dictionary with all the metrics computed from the
            agent running the set of experiments.
        """

        # Instantiate a metric object that will be used to compute the metrics for
        # the benchmark afterwards.
        metrics_object = Metrics(experiment_suite.metrics_parameters,
                                 experiment_suite.dynamic_tasks)

        # Function return the current pose and task for this benchmark.
        start_pose, start_experiment = self._recording.get_pose_and_experiment(
            experiment_suite.get_number_of_poses_task())

        logging.info('START')

        for experiment in experiment_suite.get_experiments(
        )[int(start_experiment):]:

            positions = client.load_settings(
                experiment.conditions).player_start_spots

            self._recording.log_start(experiment.task)

            for pose in experiment.poses[start_pose:]:
                for rep in range(experiment.repetitions):

                    start_index = pose[0]
                    end_index = pose[1]

                    client.start_episode(start_index)
                    # Print information on
                    logging.info('======== !!!! ==========')
                    logging.info(' Start Position %d End Position %d ',
                                 start_index, end_index)

                    self._recording.log_poses(start_index, end_index,
                                              experiment.Conditions.WeatherId)

                    # Calculate the initial distance for this episode
                    initial_distance = \
                        sldist(
                            [positions[start_index].location.x, positions[start_index].location.y],
                            [positions[end_index].location.x, positions[end_index].location.y])

                    time_out = experiment_suite.calculate_time_out(
                        self._get_shortest_path(positions[start_index],
                                                positions[end_index]))

                    # running the agent
                    (result, reward_vec, control_vec, final_time, remaining_distance, col_ped, col_veh, col_oth) = \
                        self._run_navigation_episode(
                            agent, client, time_out, positions[end_index],
                            str(experiment.Conditions.WeatherId) + '_'
                            + str(experiment.task) + '_' + str(start_index)
                            + '.' + str(end_index), experiment_suite.metrics_parameters,
                            experiment_suite.collision_as_failure)

                    # Write the general status of the just ran episode
                    self._recording.write_summary_results(
                        experiment, pose, rep, initial_distance,
                        remaining_distance, final_time, time_out, result,
                        col_ped, col_veh, col_oth)

                    # Write the details of this episode.
                    self._recording.write_measurements_results(
                        experiment, rep, pose, reward_vec, control_vec)
                    if result > 0:
                        logging.info(
                            '+++++ Target achieved in %f seconds! +++++',
                            final_time)
                    else:
                        logging.info('----- Timeout! -----')

            start_pose = 0

        self._recording.log_end()

        return metrics_object.compute(self._recording.path)

    def get_path(self):
        """
        Returns the path were the log was saved.
        """
        return self._recording.path

    def _get_directions(self, current_point, end_point):
        """
        Class that should return the directions to reach a certain goal
        """

        directions = self._planner.get_next_command(
            (current_point.location.x, current_point.location.y, 0.22),
            (current_point.orientation.x, current_point.orientation.y,
             current_point.orientation.z),
            (end_point.location.x, end_point.location.y, 0.22),
            (end_point.orientation.x, end_point.orientation.y,
             end_point.orientation.z))
        return directions

    def _get_shortest_path(self, start_point, end_point):
        """
        Calculates the shortest path between two points considering the road netowrk
        """

        return self._planner.get_shortest_path_distance(
            [start_point.location.x, start_point.location.y, 0.22],
            [start_point.orientation.x, start_point.orientation.y, 0.22],
            [end_point.location.x, end_point.location.y, end_point.location.z],
            [
                end_point.orientation.x, end_point.orientation.y,
                end_point.orientation.z
            ])

    def _has_agent_collided(self, measurement, metrics_parameters):
        """
            This function must have a certain state and only look to one measurement.
        """
        collided_veh = 0
        collided_ped = 0
        collided_oth = 0

        if (measurement.collision_vehicles - self._previous_vehicle_collision) \
                > metrics_parameters['collision_vehicles']['threshold']/2.0:
            collided_veh = 1
        if (measurement.collision_pedestrians - self._previous_pedestrian_collision) \
                > metrics_parameters['collision_pedestrians']['threshold']/2.0:
            collided_ped = 1
        if (measurement.collision_other - self._previous_other_collision) \
                > metrics_parameters['collision_other']['threshold']/2.0:
            collided_oth = 1

        self._previous_pedestrian_collision = measurement.collision_pedestrians
        self._previous_vehicle_collision = measurement.collision_other

        return collided_ped, collided_veh, collided_oth

    def _is_agent_stuck(self, measurements, stuck_vec, old_coll):
        # break the episode when the agent is stuck on a static object
        coll_other = measurements.collision_other
        coll_other -= old_coll
        otherlane = measurements.intersection_otherlane > 0.4
        offroad = measurements.intersection_offroad > 0.3
        logging.info(
            "offroad: {}, otherlane: {}, coll_other: {}, old_coll: {}".format(
                offroad, otherlane, coll_other, old_coll))

        # if still driving or got unstuck (v > 4km/h)
        if measurements.forward_speed * 3.6 > 4:
            cycle_signal(stuck_vec, 0)
            if coll_other:
                old_coll += coll_other
        elif offroad or otherlane or coll_other:
            cycle_signal(stuck_vec, 1)
        else:
            cycle_signal(stuck_vec, 0)

        return all(stuck_vec), stuck_vec, old_coll

    def _run_navigation_episode(self, agent, client, time_out, target,
                                episode_name, metrics_parameters,
                                collision_as_failure):
        """
         Run one episode of the benchmark (Pose) for a certain agent.


        Args:
            agent: the agent object
            client: an object of the carla client to communicate
            with the CARLA simulator
            time_out: the time limit to complete this episode
            target: the target to reach
            episode_name: The name for saving images of this episode
            metrics_object: The metrics object to check for collisions

        """

        # Send an initial command.
        time.sleep(2)
        measurements, sensor_data = client.read_data()
        client.send_control(VehicleControl())

        ### Reset CAL agent
        agent.reset_state()

        initial_timestamp = measurements.game_timestamp
        current_timestamp = initial_timestamp

        # The vector containing all measurements produced on this episode
        measurement_vec = []
        # The vector containing all controls produced on this episode
        control_vec = []
        frame = 0
        distance = 10000
        col_ped, col_veh, col_oth = 0, 0, 0
        fail = False
        success = False

        ### own metrics
        stuck_vec = [0] * 60  # measure for 60 frames (6 seconds)
        center_distance_vec = []
        old_collision_value = 0
        direction_vec = []

        while not fail and not success:

            # Read data from server with the client
            measurements, sensor_data = client.read_data()
            # The directions to reach the goal are calculated.
            directions = self._get_directions(
                measurements.player_measurements.transform, target)
            # Agent process the data.
            control = agent.run_step(measurements, sensor_data, directions,
                                     target)

            # Send the control commands to the vehicle
            client.send_control(control)

            # save images if the flag is activated
            self._recording.save_images(sensor_data, episode_name, frame)

            current_x = measurements.player_measurements.transform.location.x
            current_y = measurements.player_measurements.transform.location.y

            logging.info("Controller is Inputting:")
            logging.info('Steer = %f Throttle = %f Brake = %f ', control.steer,
                         control.throttle, control.brake)

            current_timestamp = measurements.game_timestamp
            # Get the distance travelled until now

            distance = sldist([current_x, current_y],
                              [target.location.x, target.location.y])
            # Write status of the run on verbose mode
            logging.info('Distance to target: %f', float(distance))

            # Check if reach the target
            col_ped, col_veh, col_oth = self._has_agent_collided(
                measurements.player_measurements, metrics_parameters)

            ### CHANGE TO ORIGINAL CODE #####################
            is_stuck, stuck_vec, old_collision_value = self._is_agent_stuck(
                measurements.player_measurements, stuck_vec,
                old_collision_value)

            if distance < self._distance_for_success:
                success = True
            elif (current_timestamp - initial_timestamp) > (time_out * 1000):
                fail = True
            elif is_stuck:
                fail = True
            elif collision_as_failure and (col_ped or col_veh or col_oth):
                fail = True

            time_remain = (time_out * 1000 -
                           (current_timestamp - initial_timestamp)) / 1000
            logging.info('Time remaining: %i m %i s', time_remain / 60,
                         time_remain % 60)
            logging.info('')

            # Increment the vectors and append the measurements and controls.
            frame += 1
            measurement_vec.append(measurements.player_measurements)
            control_vec.append(control)

        if success:
            return 1, measurement_vec, control_vec, float(
                current_timestamp - initial_timestamp
            ) / 1000.0, distance, col_ped, col_veh, col_oth
        return 0, measurement_vec, control_vec, time_out, distance, col_ped, col_veh, col_oth
Ejemplo n.º 16
0
class CarlaEnv(gym.Env):

    def __init__(self, config=ENV_CONFIG):
        self.config = config
        self.city = self.config["server_map"].split("/")[-1]
        if self.config["enable_planner"]:
            self.planner = Planner(self.city)

        self.action_space = Discrete(len(DISCRETE_ACTIONS))

        # RGB Camera
        self.frame_shape = (config["y_res"], config["x_res"])
        image_space = Box(
            0, 1, shape=(
                config["y_res"], config["x_res"],
                FRAME_DEPTH * config["framestack"]), dtype=np.float32)
        self.observation_space = image_space

        # TODO(ekl) this isn't really a proper gym spec
        self._spec = lambda: None
        self._spec.id = "Carla-v0"

        self.server_port = None
        self.server_process = None
        self.client = None
        self.num_steps = 0
        self.total_reward = 0
        self.prev_measurement = None
        self.prev_image = None
        self.episode_id = None
        self.measurements_file = None
        self.weather = None
        self.scenario = None
        self.start_pos = None
        self.end_pos = None
        self.start_coord = None
        self.end_coord = None
        self.last_obs = None
        self.last_full_obs = None
        self.framestack = [None] * config["framestack"]
        self.framestack_index = 0
        self.running_restarts = 0
        self.on_step = None
        self.on_next = None
        self.photo_index = 1
        self.episode_index = 0

    def init_server(self):
        print("Initializing new Carla server...")
        # Create a new server process and start the client.
        self.server_port = random.randint(10000, 60000)
        self.server_process = subprocess.Popen(
            [self.config["server_binary"], self.config["server_map"],
             "-windowed",
             "-ResX={}".format(self.config["render_x_res"]),
             "-ResY={}".format(self.config["render_y_res"]),
             "-fps={}".format(self.config["fps"]),
             "-carla-server",
             "-carla-world-port={}".format(self.server_port)],
            preexec_fn=os.setsid, stdout=open(os.devnull, "w"))
        live_carla_processes.add(os.getpgid(self.server_process.pid))

        for i in range(RETRIES_ON_ERROR):
            try:
                self.client = CarlaClient("localhost", self.server_port)
                return self.client.connect()
            except Exception as e:
                print("Error connecting: {}, attempt {}".format(e, i))
                time.sleep(2)

    def clear_server_state(self):
        print("Clearing Carla server state")
        try:
            if self.client:
                self.client.disconnect()
                self.client = None
        except Exception as e:
            print("Error disconnecting client: {}".format(e))
            pass
        if self.server_process:
            pgid = os.getpgid(self.server_process.pid)
            os.killpg(pgid, signal.SIGKILL)
            live_carla_processes.remove(pgid)
            self.server_port = None
            self.server_process = None

    def __del__(self):
        self.clear_server_state()

    def reset(self):
        if self.on_next is not None:
            self.on_next()

        error = None
        self.running_restarts += 1
        for _ in range(RETRIES_ON_ERROR):
            try:
                # Force a full reset of Carla after some number of restarts
                if self.running_restarts > self.config["server_restart_interval"]:
                    print("Shutting down carla server...")
                    self.running_restarts = 0
                    self.clear_server_state()

                # If server down, initialize
                if not self.server_process:
                    self.init_server()

                # Run episode reset
                return self._reset()
            except Exception as e:
                print("Error during reset: {}".format(traceback.format_exc()))
                self.clear_server_state()
                error = e
                time.sleep(5)
        raise error

    def _build_camera(self, name, post):
        camera = Camera(name, PostProcessing=post)
        camera.set_image_size(
            self.config["render_x_res"], self.config["render_y_res"])
        camera.set_position(x=2.4, y=0, z=0.8)
        camera.set_rotation(pitch=-40, roll=0, yaw=0)
        # camera.FOV = 100
        return camera


    def _reset(self):
        self.num_steps = 0
        self.total_reward = 0
        self.episode_index += 1
        self.photo_index = 1
        self.prev_measurement = None
        self.prev_image = None
        self.episode_id = datetime.today().strftime("%Y-%m-%d_%H-%M-%S_%f")
        self.measurements_file = None

        # Create a CarlaSettings object. This object is a wrapper around
        # the CarlaSettings.ini file. Here we set the configuration we
        # want for the new episode.
        settings = CarlaSettings()
        self.scenario = random.choice(self.config["scenarios"])
        assert self.scenario["city"] == self.city, (self.scenario, self.city)
        self.weather = random.choice(self.scenario["weather_distribution"])
        settings.set(
            SynchronousMode=True,
            SendNonPlayerAgentsInfo=False,
            NumberOfVehicles=self.scenario["num_vehicles"],
            NumberOfPedestrians=self.scenario["num_pedestrians"],
            WeatherId=self.weather,
            QualityLevel=self.config["quality"])
        settings.randomize_seeds()

        # Add the cameras
        if self.config["save_images_rgb"]:
            settings.add_sensor(self._build_camera(name="CameraRGB", post="SceneFinal"))
        settings.add_sensor(self._build_camera(name="CameraDepth", post="Depth"))
        settings.add_sensor(self._build_camera(name="CameraClass", post="SemanticSegmentation"))

        # Setup start and end positions
        scene = self.client.load_settings(settings)
        positions = scene.player_start_spots
        self.start_pos = positions[self.scenario["start_pos_id"]]
        self.end_pos = positions[self.scenario["end_pos_id"]]
        self.start_coord = [
            self.start_pos.location.x // 100, self.start_pos.location.y // 100]
        self.end_coord = [
            self.end_pos.location.x // 100, self.end_pos.location.y // 100]
        print(
            "Start pos {} ({}), end {} ({})".format(
                self.scenario["start_pos_id"], self.start_coord,
                self.scenario["end_pos_id"], self.end_coord))

        # Notify the server that we want to start the episode at the
        # player_start index. This function blocks until the server is ready
        # to start the episode.
        print("Starting new episode...")
        self.client.start_episode(self.scenario["start_pos_id"])

        image, py_measurements = self._read_observation()
        py_measurements["control"] = {
            "throttle_brake": 0,
            "steer": 0,
            "throttle": 0,
            "brake": 0,
            "reverse": False,
            "hand_brake": False,
        }
        self.prev_measurement = py_measurements
        return self.encode_obs(self.preprocess_image(image), py_measurements)


    def encode_obs(self, obs, py_measurements):
        new_obs = obs
        num_frames = int(self.config["framestack"])
        if num_frames > 1:
            # Spread out to number of frames
            frame_array = [obs] * num_frames
            for frame_index in range(1, num_frames):
                index = (self.framestack_index - frame_index) % num_frames
                if self.framestack[index] is not None:
                    frame_array[frame_index] = self.framestack[index]

            # Concatenate into a single np array
            new_obs = np.concatenate(frame_array, axis=2)

        # Store frame
        self.framestack[self.framestack_index] = obs
        self.framestack_index = (self.framestack_index + 1) % num_frames
        self.last_obs = obs

        # Return
        return new_obs

    def step(self, action):
        try:
            obs = self._step(action)
            self.last_full_obs = obs
            return obs
        except Exception:
            print(
                "Error during step, terminating episode early",
                traceback.format_exc())
            self.clear_server_state()
            return (self.last_full_obs, 0.0, True, {})

    def _step(self, action):
        action = DISCRETE_ACTIONS[int(action)]
        assert len(action) == 2, "Invalid action {}".format(action)
        if self.config["squash_action_logits"]:
            forward = 2 * float(sigmoid(action[0]) - 0.5)
            throttle = float(np.clip(forward, 0, 1))
            brake = float(np.abs(np.clip(forward, -1, 0)))
            steer = 2 * float(sigmoid(action[1]) - 0.5)
        else:
            throttle = float(np.clip(action[0], 0, 1))
            brake = float(np.abs(np.clip(action[0], -1, 0)))
            steer = float(np.clip(action[1], -1, 1))
        reverse = False
        hand_brake = False

        if self.config["verbose"]:
            print(
                "steer", steer, "throttle", throttle, "brake", brake,
                "reverse", reverse)

        self.client.send_control(
            steer=steer, throttle=throttle, brake=brake, hand_brake=hand_brake,
            reverse=reverse)

        # Process observations
        image, py_measurements = self._read_observation()
        if self.config["verbose"]:
            print("Next command", py_measurements["next_command"])
        if type(action) is np.ndarray:
            py_measurements["action"] = [float(a) for a in action]
        else:
            py_measurements["action"] = action
        py_measurements["control"] = {
            "throttle_brake": action[0],
            "steer": steer,
            "throttle": throttle,
            "brake": brake,
            "reverse": reverse,
            "hand_brake": hand_brake,
        }

        # Done?
        finished = self.num_steps > self.scenario["max_steps"]
        # py_measurements["next_command"] == "REACH_GOAL"
        failed = TERM.compute_termination(self, py_measurements, self.prev_measurement)
        done = finished or failed
        py_measurements["finished"] = finished
        py_measurements["failed"] = failed
        py_measurements["done"] = done

        # Determine Reward
        reward = compute_reward(self, self.prev_measurement, py_measurements)
        self.total_reward += reward
        py_measurements["reward"] = reward
        py_measurements["total_reward"] = self.total_reward
        self.prev_measurement = py_measurements

        # Callback
        if self.on_step is not None:
            self.on_step(py_measurements)

        # Write out measurements to file
        if self.config["carla_out_path"]:
            # Ensure measurements dir exists
            measurements_dir = os.path.join(self.config["carla_out_path"], self.config["measurements_subdir"])
            if not os.path.exists(measurements_dir):
                os.mkdir(measurements_dir)

            if not self.measurements_file:
                self.measurements_file = open(
                    os.path.join(
                        measurements_dir,
                        "m_{}.json".format(self.episode_id)),
                    "w")
            self.measurements_file.write(json.dumps(py_measurements))
            self.measurements_file.write("\n")
            if done:
                self.measurements_file.close()
                self.measurements_file = None
                if self.config["convert_images_to_video"]:
                    self.images_to_video(camera_name="RGB")
                    self.images_to_video(camera_name="Depth")
                    self.images_to_video(camera_name="Class")

        self.num_steps += 1
        image = self.preprocess_image(image)
        return (
            self.encode_obs(image, py_measurements), reward, done,
            py_measurements)

    def preprocess_image(self, image):
        return image

    def _fuse_observations(self, depth, clazz, speed):
        base_shape = (self.config["render_y_res"], self.config["render_x_res"])
        new_shape = (self.config["y_res"], self.config["x_res"])

        # Reduce class
        clazz = reduce_classifications(clazz)

        # Do we need to resize?
        if base_shape[0] is not new_shape[0] and base_shape[1] is not new_shape[1]:
            depth_reshape = depth.reshape(*depth.shape)
            depth = cv2.resize(depth_reshape, (new_shape[1], new_shape[0]))
            clazz = resize_classifications(clazz, new_shape)

        # Fuse with depth!
        obs = fuse_with_depth(clazz, depth, extra_layers=1)

        # Fuse with speed!
        obs[:, :, KEEP_CLASSIFICATIONS] = speed
        return obs, clazz


    def _read_observation(self):
        # Read the data produced by the server this frame.
        measurements, sensor_data = self.client.read_data()
        cur = measurements.player_measurements

        # Print some of the measurements.
        if self.config["verbose"]:
            print_measurements(measurements)

        # Fuse the observation data to create a single observation
        observation, clazzes = self._fuse_observations(
            sensor_data['CameraDepth'].data,
            sensor_data['CameraClass'].data,
            cur.forward_speed)

        if self.config["enable_planner"]:
            next_command = COMMANDS_ENUM[
                self.planner.get_next_command(
                    [cur.transform.location.x, cur.transform.location.y,
                     GROUND_Z],
                    [cur.transform.orientation.x, cur.transform.orientation.y,
                     GROUND_Z],
                    [self.end_pos.location.x, self.end_pos.location.y,
                     GROUND_Z],
                    [self.end_pos.orientation.x, self.end_pos.orientation.y,
                     GROUND_Z])
            ]
        else:
            next_command = "LANE_FOLLOW"

        if next_command == "REACH_GOAL":
            distance_to_goal = 0.0  # avoids crash in planner
        elif self.config["enable_planner"]:
            distance_to_goal = self.planner.get_shortest_path_distance(
                [cur.transform.location.x, cur.transform.location.y, GROUND_Z],
                [cur.transform.orientation.x, cur.transform.orientation.y,
                 GROUND_Z],
                [self.end_pos.location.x, self.end_pos.location.y, GROUND_Z],
                [self.end_pos.orientation.x, self.end_pos.orientation.y,
                 GROUND_Z]) / 100
        else:
            distance_to_goal = -1

        distance_to_goal_euclidean = float(np.linalg.norm(
            [cur.transform.location.x - self.end_pos.location.x,
             cur.transform.location.y - self.end_pos.location.y]) / 100)

        py_measurements = {
            "episode_id": self.episode_id,
            "step": self.num_steps,
            "x": cur.transform.location.x,
            "y": cur.transform.location.y,
            "x_orient": cur.transform.orientation.x,
            "y_orient": cur.transform.orientation.y,
            "forward_speed": cur.forward_speed,
            "distance_to_goal": distance_to_goal,
            "distance_to_goal_euclidean": distance_to_goal_euclidean,
            "collision_vehicles": cur.collision_vehicles,
            "collision_pedestrians": cur.collision_pedestrians,
            "collision_other": cur.collision_other,
            "intersection_offroad": cur.intersection_offroad,
            "intersection_otherlane": cur.intersection_otherlane,
            "weather": self.weather,
            "map": self.config["server_map"],
            "start_coord": self.start_coord,
            "end_coord": self.end_coord,
            "current_scenario": self.scenario,
            "x_res": self.config["x_res"],
            "y_res": self.config["y_res"],
            "num_vehicles": self.scenario["num_vehicles"],
            "num_pedestrians": self.scenario["num_pedestrians"],
            "max_steps": self.scenario["max_steps"],
            "next_command": next_command,
            "applied_penalty": False,
            "total_reward": self.total_reward,
        }

        if self.config["carla_out_path"] \
                and self.num_steps % self.config["save_image_frequency"] == 0\
                and self.num_steps > 15:
            self.take_photo(
                sensor_data=sensor_data,
                class_data=clazzes,
                fused_data=observation
            )

        assert observation is not None, sensor_data
        return observation, py_measurements

    def images_dir(self):
        dir = os.path.join(self.config["carla_out_path"], "images")
        if not os.path.exists(dir):
            os.mkdir(dir)
        return dir

    def episode_dir(self):
        episode_dir = os.path.join(self.images_dir(), "episode_" + str(self.episode_index))
        if not os.path.exists(episode_dir):
            os.mkdir(episode_dir)
        return episode_dir

    def take_photo(self, sensor_data, class_data, fused_data):
        # Get the proper locations\
        episode_dir = self.episode_dir()
        photo_index = self.photo_index
        name_prefix = "episode_{:>04}_step_{:>04}".format(self.episode_index, photo_index)
        self.photo_index += 1

        # Save the image
        if self.config["save_images_rgb"]:
            rgb_image = sensor_data["CameraRGB"]
            image_path = os.path.join(episode_dir, name_prefix + "_rgb.png")
            scipy.misc.imsave(image_path, rgb_image.data)

        # Save the classes
        if self.config["save_images_class"]:
            classes_path = os.path.join(episode_dir, name_prefix + "_class.png")
            scipy.misc.imsave(classes_path, class_data.data)

        # Save the class images
        if self.config["save_images_fusion"]:
            fused_images = fused_to_rgb(fused_data)
            fused_path = os.path.join(episode_dir, name_prefix + "_fused.png")
            scipy.misc.imsave(fused_path, fused_images.data)

    def images_to_video(self, camera_name):
        # Video directory
        videos_dir = os.path.join(self.config["carla_out_path"], "Videos" + camera_name)
        if not os.path.exists(videos_dir):
            os.makedirs(videos_dir)

        # Build command
        video_fps = self.config["fps"] / self.config["saveimage_frequency"]
        ffmpeg_cmd = (
            "ffmpeg -loglevel -8 -r 10 -f image2 -s {x_res}x{y_res} "
            "-start_number 0 -i "
            "{img}_%04d.jpg -vcodec libx264 {vid}.mp4 && rm -f {img}_*.jpg "
        ).format(
            x_res=self.config["render_x_res"],
            y_res=self.config["render_y_res"],
            vid=os.path.join(videos_dir, self.episode_id),
            img=os.path.join(self.config["carla_out_path"], "Camera" + camera_name, self.episode_id))

        # Execute command
        print("Executing ffmpeg command: ", ffmpeg_cmd)
        try:
            subprocess.call(ffmpeg_cmd, shell=True, timeout=60)
        except Exception as ex:
            print("FFMPEG EXPIRED")
            print(ex)