def build_experiments(self):
        """
        Creates the whole set of experiment objects,
        The experiments created depend on the selected Town.


        """

        # We set the camera
        # This single RGB camera is used on every experiment

        camera = Camera('CameraRGB')
        camera.set(FOV=100)
        camera.set_image_size(200, 88)
        camera.set_position(2.0, 0.0, 1.4)
        camera.set_rotation(-15.0, 0, 0)

        if self._city_name == 'Town01':
            poses_tasks = self._poses_town01()
            vehicles_tasks = [0, 0, 0, 20]
            pedestrians_tasks = [0, 0, 0, 50]
        else:
            poses_tasks = self._poses_town02()
            vehicles_tasks = [0, 0, 0, 15]
            pedestrians_tasks = [0, 0, 0, 50]

        experiments_vector = []

        for weather in self.weathers:

            for iteration in range(len(poses_tasks)):
                poses = poses_tasks[iteration]

                #weather=14

                #poses = [[54, 60]]
                #poses=[[37,76]]
                #poses = [[80, 29]]
                #poses = [[50,43]]
                vehicles = vehicles_tasks[iteration]
                pedestrians = pedestrians_tasks[iteration]

                conditions = CarlaSettings()
                conditions.set(SendNonPlayerAgentsInfo=True,
                               NumberOfVehicles=vehicles,
                               NumberOfPedestrians=pedestrians,
                               WeatherId=weather)
                conditions.set(SynchronousMode=True)
                # Add all the cameras that were set for this experiments

                conditions.add_sensor(camera)

                experiment = Experiment()
                experiment.set(Conditions=conditions,
                               Poses=poses,
                               Task=iteration,
                               Repetitions=1)
                experiments_vector.append(experiment)

        return experiments_vector
Exemple #2
0
    def _reset(self):
        self.num_steps = 0
        self.total_reward = 0
        self.prev_measurement = None
        self.prev_image = None
        self.episode_id = datetime.today().strftime("%Y-%m-%d_%H-%M-%S_%f")
        self.measurements_file = None

        # Create a CarlaSettings object. This object is a wrapper around
        # the CarlaSettings.ini file. Here we set the configuration we
        # want for the new episode.
        settings = CarlaSettings()
        self.scenario = random.choice(self.config["scenarios"])
        assert self.scenario["city"] == self.city, (self.scenario, self.city)
        self.weather = random.choice(self.scenario["weather_distribution"])
        settings.set(
            SynchronousMode=True,
            SendNonPlayerAgentsInfo=True,
            NumberOfVehicles=self.scenario["num_vehicles"],
            NumberOfPedestrians=self.scenario["num_pedestrians"],
            WeatherId=self.weather)
        settings.randomize_seeds()

        if self.config["use_depth_camera"]:
            camera1 = Camera("CameraDepth", PostProcessing="Depth")
            camera1.set_image_size(self.config["render_x_res"],
                                   self.config["render_y_res"])
            camera1.set_position(30, 0, 130)
            settings.add_sensor(camera1)

        camera2 = Camera("CameraRGB")
        camera2.set_image_size(self.config["render_x_res"],
                               self.config["render_y_res"])
        camera2.set_position(30, 0, 130)
        settings.add_sensor(camera2)

        # Setup start and end positions
        scene = self.client.load_settings(settings)
        positions = scene.player_start_spots
        self.start_pos = positions[self.scenario["start_pos_id"]]
        self.end_pos = positions[self.scenario["end_pos_id"]]
        self.start_coord = [
            self.start_pos.location.x // 100, self.start_pos.location.y // 100
        ]
        self.end_coord = [
            self.end_pos.location.x // 100, self.end_pos.location.y // 100
        ]
        print("Start pos {} ({}), end {} ({})".format(
            self.scenario["start_pos_id"], self.start_coord,
            self.scenario["end_pos_id"], self.end_coord))

        # Notify the server that we want to start the episode at the
        # player_start index. This function blocks until the server is ready
        # to start the episode.
        print("Starting new episode...")
        self.client.start_episode(self.scenario["start_pos_id"])

        image, py_measurements = self._read_observation()
        self.prev_measurement = py_measurements
        return self.encode_obs(self.preprocess_image(image), py_measurements)
Exemple #3
0
    def __setup_camera_tranforms(self,
                                 name,
                                 postprocessing,
                                 field_of_view=90.0,
                                 image_size=(800, 600),
                                 position=(0.3, 0, 1.3),
                                 rotation_pitch=0,
                                 rotation_roll=0,
                                 rotation_yaw=0):
        camera = Camera(name,
                        PostProcessing=postprocessing,
                        FOV=field_of_view,
                        ImageSizeX=image_size[0],
                        ImageSizeY=image_size[1],
                        PositionX=position[0],
                        PositionY=position[1],
                        PositionZ=position[2],
                        RotationPitch=rotation_pitch,
                        RotationRoll=rotation_roll,
                        RotationYaw=rotation_yaw)

        image_width = image_size[0]
        image_height = image_size[1]
        # (Intrinsic) K Matrix
        intrinsic_mat = np.identity(3)
        intrinsic_mat[0][2] = image_width / 2
        intrinsic_mat[1][2] = image_height / 2
        intrinsic_mat[0][0] = intrinsic_mat[1][1] = image_width / (
            2.0 * math.tan(90.0 * math.pi / 360.0))
        return (intrinsic_mat, camera.get_unreal_transform(), (image_width,
                                                               image_height))
Exemple #4
0
def generate_settings_scenario_010():
    logging.info('Scenario 010: 3 cameras')
    settings = make_base_settings()
    settings.add_sensor(Camera('DefaultRGBCamera'))
    settings.add_sensor(Camera('DefaultDepthCamera', PostProcessing='Depth'))
    settings.add_sensor(Camera('DefaultSemSegCamera', PostProcessing='SemanticSegmentation'))
    return settings
 def run(self):
     settings = CarlaSettings(QualityLevel='Low')
     settings.add_sensor(Lidar('DefaultLidar'))
     settings.add_sensor(Camera('DefaultCamera'))
     settings.add_sensor(Camera('DefaultDepth', PostProcessing='Depth'))
     settings.add_sensor(
         Camera('DefaultSemSeg', PostProcessing='SemanticSegmentation'))
     self.run_carla_client(settings, 3, 200)
    def build_experiments(self):
        """
            Creates the whole set of experiment objects,
            The experiments created depends on the selected Town.

        """

        # We check the town, based on that we define the town related parameters
        # The size of the vector is related to the number of tasks, inside each
        # task there is also multiple poses ( start end, positions )
        if self._city_name == 'Town01':
            poses_tasks = [[[7, 3]], [[138, 17]], [[140, 134]], [[140, 134]]]
            vehicles_tasks = [0, 0, 0, 20]
            pedestrians_tasks = [0, 0, 0, 50]
        else:
            poses_tasks = [[[4, 2]], [[37, 76]], [[19, 66]], [[19, 66]]]
            vehicles_tasks = [0, 0, 0, 15]
            pedestrians_tasks = [0, 0, 0, 50]

        # We set the camera
        # This single RGB camera is used on every experiment

        camera = Camera('CameraRGB')
        camera.set(FOV=100)
        camera.set_image_size(800, 600)
        camera.set_position(2.0, 0.0, 1.4)
        camera.set_rotation(-15.0, 0, 0)

        # Based on the parameters, creates a vector with experiment objects.
        experiments_vector = []
        for weather in self.weathers:

            for iteration in range(len(poses_tasks)):
                poses = poses_tasks[iteration]
                vehicles = vehicles_tasks[iteration]
                pedestrians = pedestrians_tasks[iteration]

                conditions = CarlaSettings()
                conditions.set(
                    SendNonPlayerAgentsInfo=True,
                    NumberOfVehicles=vehicles,
                    NumberOfPedestrians=pedestrians,
                    WeatherId=weather

                )
                # Add all the cameras that were set for this experiments
                conditions.add_sensor(camera)
                experiment = Experiment()
                experiment.set(
                    Conditions=conditions,
                    Poses=poses,
                    Task=iteration,
                    Repetitions=1
                )
                experiments_vector.append(experiment)

        return experiments_vector
    def build_experiments(self):
        """
        Creates the whole set of experiment objects,
        The experiments created depend on the selected Town.
        """

        # We set the camera
        # This single RGB camera is used on every experiment

        camera = Camera('CameraRGB')
        camera.set(FOV=100)
        camera.set_image_size(800, 600)
        camera.set_position(2.0, 0.0, 1.4)
        camera.set_rotation(-15.0, 0, 0)

        if self._city_name == 'Town01':
            poses_tasks = self._poses_town01()
            vehicles_tasks = [25]
            pedestrians_tasks = [25]
        else:
            poses_tasks = self._poses_town02()
            vehicles_tasks = [0, 25]
            pedestrians_tasks = [0, 25]

        experiments_vector = []
        #weathers = [1,3,6,8]
        for weather in self.weathers:
            #prinst(weather , vehicles_tasks)
            for scenario in range(len(vehicles_tasks)):
                for iteration in range(len(poses_tasks)):
                    #print(f"interation : {iteration} , scenario:{scenario}")
                    poses = poses_tasks[iteration]
                    #print("poses re",poses)
                    vehicles = vehicles_tasks[scenario]
                    #print("Vehicles: ",vehicles)
                    pedestrians = pedestrians_tasks[scenario]
                    #print("pedestrians: ",pedestrians)

                    conditions = CarlaSettings()
                    conditions.set(SendNonPlayerAgentsInfo=True,
                                   NumberOfVehicles=vehicles,
                                   NumberOfPedestrians=pedestrians,
                                   WeatherId=weather)
                    # Add all the cameras that were set for this experiments

                    conditions.add_sensor(camera)

                    experiment = Experiment()
                    experiment.set(Conditions=conditions,
                                   Poses=poses,
                                   Task=iteration,
                                   Repetitions=1)
                    experiments_vector.append(experiment)

        return experiments_vector
Exemple #8
0
    def build_experiments(self):
        """
        Creates the whole set of experiment objects,
        The experiments created depend on the selected Town.


        """

        # We set the camera
        # This single RGB camera is used on every experiment

        camera = Camera('rgb')
        camera.set(FOV=90)
        camera.set_image_size(300, 300)
        camera.set_position(0.2, 0.0, 0.85)
        camera.set_rotation(-3.0, 0, 0)

        poses_tasks = self._poses()
        vehicles_tasks = [0, 0, 0, 15]
        pedestrians_tasks = [0, 0, 0, 50]



        experiments_vector = []

        for weather in self.weathers:

            for iteration in range(len(poses_tasks)):
                poses = poses_tasks[iteration]
                vehicles = vehicles_tasks[iteration]
                pedestrians = pedestrians_tasks[iteration]

                conditions = CarlaSettings()
                conditions.set(
                    SendNonPlayerAgentsInfo=True,
                    NumberOfVehicles=vehicles,
                    NumberOfPedestrians=pedestrians,
                    WeatherId=weather,
                    QualityLevel='Epic'
                )
                # Add all the cameras that were set for this experiments

                conditions.add_sensor(camera)

                experiment = Experiment()
                experiment.set(
                    Conditions=conditions,
                    Poses=poses,
                    Task=iteration,
                    Repetitions=1
                )
                experiments_vector.append(experiment)

        return experiments_vector
    def build_experiments(self):
        """
            Creates the whole set of experiment objects,
            The experiments created depends on the selected Town.

        """

        # We check the town, based on that we define the town related parameters
        # The size of the vector is related to the number of tasks, inside each
        # task there is also multiple poses ( start end, positions )
        if self._city_name == 'Town01':
            poses_tasks = [[[7, 3]], [[138, 17]], [[140, 134]], [[140, 134]]]
            vehicles_tasks = [0, 0, 0, 20]
            pedestrians_tasks = [0, 0, 0, 50]
        else:
            poses_tasks = [[[4, 2]], [[37, 76]], [[19, 66]], [[19, 66]]]
            vehicles_tasks = [0, 0, 0, 15]
            pedestrians_tasks = [0, 0, 0, 50]

        # We set the camera
        # This single RGB camera is used on every experiment

        camera = Camera('CameraRGB')
        camera.set(FOV=100)
        camera.set_image_size(800, 600)
        camera.set_position(2.0, 0.0, 1.4)
        camera.set_rotation(-15.0, 0, 0)

        # Based on the parameters, creates a vector with experiment objects.
        experiments_vector = []
        for weather in self.weathers:

            for iteration in range(len(poses_tasks)):
                poses = poses_tasks[iteration]
                vehicles = vehicles_tasks[iteration]
                pedestrians = pedestrians_tasks[iteration]

                conditions = CarlaSettings()
                conditions.set(
                    SendNonPlayerAgentsInfo=True,
                    NumberOfVehicles=vehicles,
                    NumberOfPedestrians=pedestrians,
                    WeatherId=weather

                )
                # Add all the cameras that were set for this experiments
                conditions.add_sensor(camera)
                experiment = Experiment()
                experiment.set(
                    Conditions=conditions,
                    Poses=poses,
                    Task=iteration,
                    Repetitions=1
                )
                experiments_vector.append(experiment)

        return experiments_vector
Exemple #10
0
    def __init__(self,
                 client,
                 frame_skip=1,
                 cam_width=800,
                 cam_height=600,
                 town_string='Town01'):
        super(StraightDriveEnv, self).__init__()
        self.frame_skip = frame_skip
        self.client = client
        self._planner = Planner(town_string)

        camera0 = Camera('CameraRGB')
        camera0.set(CameraFOV=100)
        camera0.set_image_size(cam_height, cam_width)
        camera0.set_position(200, 0, 140)
        camera0.set_rotation(-15.0, 0, 0)

        self.start_goal_pairs = [[36, 40], [39, 35], [110, 114], [7,
                                                                  3], [0, 4],
                                 [68, 50], [61, 59], [47, 64], [147, 90],
                                 [33, 87], [26, 19], [80, 76], [45, 49],
                                 [55, 44], [29, 107], [95, 104], [84, 34],
                                 [53, 67], [22, 17], [91, 148], [20, 107],
                                 [78, 70], [95, 102], [68, 44], [45, 69]]

        vehicles = 0
        pedestrians = 0
        weather = 1

        settings = CarlaSettings()
        settings.set(
            SynchronousMode=True,
            SendNonPlayerAgentsInfo=True,
            NumberOfVehicles=vehicles,
            NumberOfPedestrians=pedestrians,
            WeatherId=weather,
        )

        settings.randomize_seeds()
        settings.add_sensor(camera0)

        self.scene = self.client.load_settings(settings)

        img_shape = (cam_width, cam_height, 3)
        self.observation_space = spaces.Tuple(
            (spaces.Box(-np.inf, np.inf, (3, )), spaces.Box(0, 255,
                                                            img_shape)))
        self.action_space = spaces.Box(-np.inf, np.inf, shape=(3, ))

        self.prev_state = np.array([0., 0., 0.])
        self.prev_collisions = np.array([0., 0., 0.])
        self.prev_intersections = np.array([0., 0.])
Exemple #11
0
    def build_experiments(self):
        """
        Creates the whole set of experiment objects,
        The experiments created depend on the selected Town.


        """

        # We set the camera
        # This single RGB camera is used on every experiment

        camera = Camera('CameraRGB')
        camera.set(FOV=100)
        camera.set_image_size(800, 600)
        camera.set_position(2.0, 0.0, 1.4)
        camera.set_rotation(-15.0, 0, 0)

        if self._city_name == 'Town01':
            poses_tasks = self._poses_town01()
            vehicles_tasks = [0, 0, 0, 20]
            pedestrians_tasks = [0, 0, 0, 50]
        else:
            poses_tasks = self._poses_town02()
            vehicles_tasks = [0, 0, 0, 15]
            pedestrians_tasks = [0, 0, 0, 50]

        experiments_vector = []

        for weather in self.weathers:

            for iteration in range(len(poses_tasks)):
                poses = poses_tasks[iteration]
                vehicles = vehicles_tasks[iteration]
                pedestrians = pedestrians_tasks[iteration]

                conditions = CarlaSettings()
                conditions.set(
                    SendNonPlayerAgentsInfo=True,
                    NumberOfVehicles=vehicles,
                    NumberOfPedestrians=pedestrians,
                    WeatherId=weather
                )
                # Add all the cameras that were set for this experiments

                conditions.add_sensor(camera)

                experiment = Experiment()
                experiment.set(
                    Conditions=conditions,
                    Poses=poses,
                    Task=iteration,
                    Repetitions=1
                )
                experiments_vector.append(experiment)

        return experiments_vector
    def build_experiments(self):
        """
        Creates the whole set of experiment objects,
        The experiments created depend on the selected Town.


        """

        # We set the camera
        # This single RGB camera is used on every experiment

        camera = Camera('rgb')
        camera.set(FOV=100)
        camera.set_image_size(800, 600)
        camera.set_position(2.0, 0.0, 1.4)
        camera.set_rotation(-15.0, 0, 0)

        poses_tasks = self._poses()
        vehicles_tasks = [0, 15, 70]
        pedestrians_tasks = [0, 50, 150]

        task_names = ['empty', 'normal', 'cluttered']

        experiments_vector = []

        for weather in self.weathers:

            for iteration in range(len(poses_tasks)):
                poses = poses_tasks[iteration]
                vehicles = vehicles_tasks[iteration]
                pedestrians = pedestrians_tasks[iteration]

                conditions = CarlaSettings()
                conditions.set(SendNonPlayerAgentsInfo=True,
                               NumberOfVehicles=vehicles,
                               NumberOfPedestrians=pedestrians,
                               WeatherId=weather)
                conditions.set(DisableTwoWheeledVehicles=True)
                # Add all the cameras that were set for this experiments

                conditions.add_sensor(camera)

                experiment = Experiment()
                experiment.set(Conditions=conditions,
                               Poses=poses,
                               Task=iteration,
                               TaskName=task_names[iteration],
                               Repetitions=1)
                experiments_vector.append(experiment)

        return experiments_vector
Exemple #13
0
    def _setup_carla_client(self):
        carla_client = CarlaClient(self._params['host'], self._params['port'], timeout=None)
        carla_client.connect()

        ### create initial settings
        carla_settings = CarlaSettings()
        carla_settings.set(
            SynchronousMode=True,
            SendNonPlayerAgentsInfo=False,
            NumberOfVehicles=self._params['number_of_vehicles'],
            NumberOfPedestrians=self._params['number_of_pedestrians'],
            WeatherId=self._params['weather'])
        carla_settings.randomize_seeds()

        ### add cameras
        for camera_name in self._params['cameras']:
            camera_params = self._params[camera_name]
            camera_postprocessing = camera_params['postprocessing']
            camera = Camera(camera_name, PostProcessing=camera_postprocessing)
            camera.set_image_size(CarlaCollSpeedEnv.CAMERA_HEIGHT * CarlaCollSpeedEnv.WIDTH_TO_HEIGHT_RATIO, CarlaCollSpeedEnv.CAMERA_HEIGHT)
            camera.set_position(*camera_params['position'])
            camera.set(**{'FOV': camera_params['fov']})

            carla_settings.add_sensor(camera)

        ### load settings
        carla_scene = carla_client.load_settings(carla_settings)

        self._carla_client = carla_client
        self._carla_settings = carla_settings
        self._carla_scene = carla_scene
Exemple #14
0
 def _build_camera(self, name, post):
     camera = Camera(name, PostProcessing=post)
     camera.set_image_size(
         self.config["render_x_res"], self.config["render_y_res"])
     camera.set_position(x=2.4, y=0, z=0.8)
     camera.set_rotation(pitch=-40, roll=0, yaw=0)
     # camera.FOV = 100
     return camera
Exemple #15
0
def generate_settings_scenario_005():
    logging.info(
        'Scenario 005: single camera SemanticSegmentation, no agents info')
    settings = make_base_settings()
    settings.add_sensor(
        Camera('DefaultCamera', PostProcessing='SemanticSegmentation'))
    return settings
Exemple #16
0
    def __add_camera(self, camera_setup):
        """Adds a camera and a corresponding output stream.

        Args:
            camera_setup: A camera setup object.
        """
        # Transform from Carla 0.9.x postprocessing strings to Carla 0.8.4.
        if camera_setup.camera_type == 'sensor.camera.rgb':
            postprocessing = 'SceneFinal'
        elif camera_setup.camera_type == 'sensor.camera.depth':
            postprocessing = 'Depth'
        elif camera_setup.camera_type == 'sensor.camera.semantic_segmentation':
            postprocessing = 'SemanticSegmentation'
        transform = camera_setup.get_transform()
        camera = Camera(name=camera_setup.name,
                        PostProcessing=postprocessing,
                        FOV=camera_setup.fov,
                        ImageSizeX=camera_setup.width,
                        ImageSizeY=camera_setup.height,
                        PositionX=transform.location.x,
                        PositionY=transform.location.y,
                        PositionZ=transform.location.z,
                        RotationPitch=transform.rotation.pitch,
                        RotationRoll=transform.rotation.roll,
                        RotationYaw=transform.rotation.yaw)

        self._settings.add_sensor(camera)
Exemple #17
0
    def __add_camera(self,
                     name,
                     postprocessing,
                     image_size=(800, 600),
                     field_of_view=90.0,
                     position=(0.3, 0, 1.3),
                     rotation_pitch=0,
                     rotation_roll=0,
                     rotation_yaw=0):
        """Adds a camera and a corresponding output stream.

        Args:
            name: A string naming the camera.
            postprocessing: "SceneFinal", "Depth", "SemanticSegmentation".
        """
        camera = Camera(name,
                        PostProcessing=postprocessing,
                        FOV=field_of_view,
                        ImageSizeX=image_size[0],
                        ImageSizeY=image_size[1],
                        PositionX=position[0],
                        PositionY=position[1],
                        PositionZ=position[2],
                        RotationPitch=rotation_pitch,
                        RotationRoll=rotation_roll,
                        RotationYaw=rotation_yaw)

        self.settings.add_sensor(camera)
Exemple #18
0
    def _add_sensors(self):
        camera0 = Camera('RenderCamera0')
        # Set image resolution in pixels.
        camera0.set_image_size(800, 600)
        # Set its position relative to the car in meters.
        camera0.set_position(-4.30, 0, 2.60)
        camera0.set_rotation(pitch=-25, yaw=0, roll=0)

        self.settings.add_sensor(camera0)
 def run(self):
     settings = CarlaSettings()
     settings.set(SynchronousMode=True,
                  SendNonPlayerAgentsInfo=True,
                  NumberOfVehicles=60,
                  NumberOfPedestrians=90)
     settings.add_sensor(Camera('DefaultCamera'))
     self.run_carla_client(settings, 3, 100)
Exemple #20
0
	def create_camera(self, camera_name, PostProcessing):
		#camera = Camera('CameraRGB')
		#camera.set_image_size(carla_config.server_width, carla_config.server_height)
		#camera.set_position(200, 0, 140)
		#camera.set_rotation(0, 0, 0)
		#self.settings.add_sensor(camera)
		camera = Camera(camera_name, **dict(self.camera_settings, PostProcessing=PostProcessing))
		return camera
Exemple #21
0
def get_default_carla_settings(args):
    settings = CarlaSettings(SynchronousMode=args.synchronous,
                             SendNonPlayerAgentsInfo=False,
                             NumberOfVehicles=20,
                             NumberOfPedestrians=40,
                             WeatherId=1)
    settings.add_sensor(Camera('Camera1'))
    return str(settings)
Exemple #22
0
 def run(self):
     settings = CarlaSettings()
     settings.add_sensor(Camera('DefaultCamera'))
     camera2 = Camera('Camera2')
     camera2.set(PostProcessing='Depth', CameraFOV=120)
     camera2.set_image_size(1924, 1028)
     settings.add_sensor(camera2)
     self.run_carla_client(settings, 3, 100)
    def reset(self):
        """

        :return:
        """
        settings = CarlaSettings()
        settings.set(SynchronousMode=True,
                     SendNonPlayerAgentsInfo=True,
                     NumberOfVehicles=20,
                     NumberOfPedestrians=40,
                     WeatherId=random.choice([1, 3, 7, 8, 14]),
                     QualityLevel='Epic')

        # CAMERA
        camera0 = Camera('CameraRGB', PostProcessing='SceneFinal')
        # Set image resolution in pixels.
        camera0.set_image_size(800, 600)
        # Set its position relative to the car in meters.
        camera0.set_position(0.30, 0, 1.30)
        settings.add_sensor(camera0)

        # Let's add another camera producing ground-truth depth.
        # camera1 = Camera('CameraDepth', PostProcessing='Depth')
        # camera1.set_image_size(800, 600)
        # camera1.set_position(0.30, 0, 1.30)
        # settings.add_sensor(camera1)

        # LIDAR
        # lidar = Lidar('Lidar32')
        # lidar.set_position(0, 0, 2.50)
        # lidar.set_rotation(0, 0, 0)
        # lidar.set(
        #     Channels=32,
        #     Range=50,
        #     PointsPerSecond=100000,
        #     RotationFrequency=10,
        #     UpperFovLimit=10,
        #     LowerFovLimit=-30)
        # settings.add_sensor(lidar)

        scene = self.carla_client.load_settings(settings)
        self.pre_image = None
        self.cur_image = None
        self.pre_measurements = None
        self.cur_measurements = None

        # define a random starting point of the agent for the appropriate trainning
        number_of_player_starts = len(scene.player_start_spots)
        player_start = random.randint(0, max(0, number_of_player_starts - 1))
        # player_start = 140
        self.carla_client.start_episode(player_start)
        print('Starting new episode at %r, %d...' %
              (scene.map_name, player_start))

        # TODO: read and return status after reset
        return
Exemple #24
0
def gen_settings(args):
	settings = CarlaSettings()
	settings.set(
		SynchronousMode=True,
		SendNonPlayerAgentsInfo=False,
		NumberOfVehicles=0,
		NumberOfPedestrians=0,
		WeatherId=args.weather,
		QualityLevel=args.quality_level)
	settings.randomize_seeds()
	camera_pos_x = 2
	camera_pos_y = 0
	camera_pos_z = 1

	camera = Camera("MainCamera")
	camera.set_image_size(800, 600)
	camera.set_position(camera_pos_x, camera_pos_y, camera_pos_z)
	settings.add_sensor(camera)
	return settings
Exemple #25
0
def make_carla_settings(args):
    '''
    Creates a CarlaSettings object.
    It configures the sensors, pedestrians, vehicles, weather, etc.
    '''
    settings = CarlaSettings()
    settings.set(SynchronousMode=True,
                 SendNonPlayerAgentsInfo=True,
                 NumberOfVehicles=args.vehicles,
                 NumberOfPedestrians=args.pedestrians,
                 WeatherId=random.choice([1, 3, 7, 8, 14]),
                 QualityLevel=args.quality_level)
    settings.randomize_seeds()
    # The default camera captures RGB images of the scene.
    camera0 = Camera('CameraRGB')
    # Set image resolution in pixels.
    camera0.set_image_size(800, 600)
    # Set its position relative to the car in meters.
    camera0.set_position(0.30, 0, 1.30)
    settings.add_sensor(camera0)
    return settings
Exemple #26
0
def get_carla_settings(settings_file=None):

    if settings_file is None:

        # Create a CarlaSettings object. This object is a wrapper around
        # the CarlaSettings.ini file. Here we set the configuration we
        # want for the new episode.
        settings = CarlaSettings()
        settings.set(
            SynchronousMode=True,
            SendNonPlayerAgentsInfo=False,
            NumberOfVehicles=0,
            NumberOfPedestrians=0,
            # 8-14 are sunset; we want easy first
            WeatherId=random.choice(range(0, 11)),
            QualityLevel='Epic'
        )
        settings.randomize_seeds()

        # Now we want to add a couple of cameras to the player vehicle.
        # We will collect the images produced by these cameras every
        # frame.

        # The default camera captures RGB images of the scene.
        camera0 = Camera('CameraRGB')
        # Set image resolution in pixels.
        camera0.set_image_size(carla_config.render_width,
                               carla_config.render_height)
        # Set its position relative to the car in meters.
        camera0.set_position(0.30, 0, 1.30)
        settings.add_sensor(camera0)

    else:

        # Alternatively, we can load these settings from a file.
        with open(settings_file, 'r') as fp:
            settings = fp.read()

    return settings
Exemple #27
0
    def _load_settings(self, settings):
        """Load Carla settings
        Override to customize settings
        :param settings: default settings
        :return: new settings
        """
        settings.set(SynchronousMode=True,
                     SendNonPlayerAgentsInfo=True,
                     NumberOfVehicles=20,
                     NumberOfPedestrians=40,
                     WeatherId=self.rng.choice([1, 3, 7, 8, 14]),
                     QualityLevel='Low')

        # CAMERA
        camera0 = Camera('CameraRGB', PostProcessing='SceneFinal')
        # Set image resolution in pixels.
        camera0.set_image_size(carla_config.CARLA_IMG_HEIGHT,
                               carla_config.CARLA_IMG_WIDTH)
        # Set its position relative to the car in meters.
        camera0.set_position(0.30, 0, 1.30)
        settings.add_sensor(camera0)
        return settings
Exemple #28
0
def run_carla_client(args):
    skip_frames = 100  # 100 # at 10 fps
    number_of_episodes = args.n_episode
    frames_per_episode = args.n_frame

    # n_weather = 14 # weathers starts from 1
    # n_player_start_spots = 152 # Town01
    n_player_start_spots = 83  # Town02

    weathers = number_of_episodes * [2]  # CloudyNoon
    start_spots = list(range(n_player_start_spots))
    random.shuffle(start_spots)
    assert number_of_episodes < n_player_start_spots
    start_spots = start_spots[:number_of_episodes]

    # weathers = list(range(number_of_episodes))
    # # random.shuffle(weathers)
    # weathers = [w % 14 + 1 for w in weathers]
    # https://carla.readthedocs.io/en/latest/carla_settings/

    # number_of_episodes = n_weather*n_player_start_spots
    # frames_per_episode = args.n_frame

    # weathers, start_spots = np.meshgrid(list(range(1, n_weather+1)), list(range(n_player_start_spots)))

    # weathers = weathers.flatten()
    # start_spots = start_spots.flatten()

    if not os.path.isdir(args.out_dir):
        os.makedirs(args.out_dir)
    np.savetxt(join(args.out_dir, 'weathers.txt'), weathers, fmt='%d')
    np.savetxt(join(args.out_dir, 'start-spots.txt'), start_spots, fmt='%d')
    # We assume the CARLA server is already waiting for a client to connect at
    # host:port. To create a connection we can use the `make_carla_client`
    # context manager, it creates a CARLA client object and starts the
    # connection. It will throw an exception if something goes wrong. The
    # context manager makes sure the connection is always cleaned up on exit.
    with make_carla_client(args.host, args.port) as client:
        print('CarlaClient connected')

        for episode in range(number_of_episodes):
            # Start a new episode.

            if args.settings_filepath is None:

                # Create a CarlaSettings object. This object is a wrapper around
                # the CarlaSettings.ini file. Here we set the configuration we
                # want for the new episode.
                settings = CarlaSettings()
                settings.set(
                    SynchronousMode=True,
                    SendNonPlayerAgentsInfo=True,
                    NumberOfVehicles=20,
                    NumberOfPedestrians=40,
                    WeatherId=weathers[episode],
                    # WeatherId=random.randrange(14) + 1,
                    # WeatherId=random.choice([1, 3, 7, 8, 14]),
                    QualityLevel=args.quality_level)
                settings.randomize_seeds()

                # Now we want to add a couple of cameras to the player vehicle.
                # We will collect the images produced by these cameras every
                # frame.

                # The default camera captures RGB images of the scene.
                camera0 = Camera('RGB')
                # Set image resolution in pixels.
                camera0.set(FOV=args.cam_fov)
                camera0.set_image_size(args.x_res, args.y_res)
                # Set its position relative to the car in meters.
                camera0.set_position(*args.cam_offset)
                camera0.set_rotation(*args.cam_rotation)
                settings.add_sensor(camera0)

                # Let's add another camera producing ground-truth depth.
                camera1 = Camera('Depth', PostProcessing='Depth')
                camera1.set(FOV=args.cam_fov)
                camera1.set_image_size(args.x_res, args.y_res)
                camera1.set_position(*args.cam_offset)
                camera1.set_rotation(*args.cam_rotation)
                settings.add_sensor(camera1)

                camera2 = Camera('SegRaw',
                                 PostProcessing='SemanticSegmentation')
                camera2.set(FOV=args.cam_fov)
                camera2.set_image_size(args.x_res, args.y_res)
                camera2.set_position(*args.cam_offset)
                camera2.set_rotation(*args.cam_rotation)
                settings.add_sensor(camera2)

                if args.lidar:
                    lidar = Lidar('Lidar32')
                    lidar.set_position(0, 0, 2.50)
                    lidar.set_rotation(0, 0, 0)
                    lidar.set(Channels=32,
                              Range=50,
                              PointsPerSecond=100000,
                              RotationFrequency=10,
                              UpperFovLimit=10,
                              LowerFovLimit=-30)
                    settings.add_sensor(lidar)

            else:

                # Alternatively, we can load these settings from a file.
                with open(args.settings_filepath, 'r') as fp:
                    settings = fp.read()

            # Now we load these settings into the server. The server replies
            # with a scene description containing the available start spots for
            # the player. Here we can provide a CarlaSettings object or a
            # CarlaSettings.ini file as string.
            scene = client.load_settings(settings)

            # Choose one player start at random.
            # number_of_player_starts = len(scene.player_start_spots)
            # player_start = random.randrange(number_of_player_starts)

            player_start = start_spots[episode]

            # Notify the server that we want to start the episode at the
            # player_start index. This function blocks until the server is ready
            # to start the episode.
            print('Starting new episode...')
            client.start_episode(player_start)

            frame = 0
            save_frame_idx = 0

            # extra_frames = 0
            # last_control = None
            # last_control_changed = 0
            # while frame < skip_frames + frames_per_episode + extra_frames:

            # Iterate every frame in the episode.
            for frame in range(skip_frames + frames_per_episode):
                # Read the data produced by the server this frame.
                measurements, sensor_data = client.read_data()

                # Print some of the measurements.
                print_measurements(measurements)

                # Save the images to disk if requested.
                if args.save_images_to_disk and frame >= skip_frames \
                    and (frame - skip_frames) % args.save_every_n_frames == 0:
                    # if last_control_changed < frame - args.save_every_n_frames:
                    #     extra_frames += args.save_every_n_frames
                    #     last_control_changed += args.save_every_n_frames
                    # else:
                    save_frame_idx += 1
                    for name, measurement in sensor_data.items():
                        filename = args.out_filename_format.format(
                            episode + 1, name, save_frame_idx)
                        measurement.save_to_disk(filename)

                # We can access the encoded data of a given image as numpy
                # array using its "data" property. For instance, to get the
                # depth value (normalized) at pixel X, Y
                #
                #     depth_array = sensor_data['CameraDepth'].data
                #     value_at_pixel = depth_array[Y, X]
                #

                # Now we have to send the instructions to control the vehicle.
                # If we are in synchronous mode the server will pause the
                # simulation until we send this control.

                if not args.autopilot:

                    client.send_control(steer=random.uniform(-1.0, 1.0),
                                        throttle=0.5,
                                        brake=0.0,
                                        hand_brake=False,
                                        reverse=False)

                else:

                    # Together with the measurements, the server has sent the
                    # control that the in-game autopilot would do this frame. We
                    # can enable autopilot by sending back this control to the
                    # server. We can modify it if wanted, here for instance we
                    # will add some noise to the steer.

                    control = measurements.player_measurements.autopilot_control
                    # if last_control:
                    #     for v1, v2 in zip(control.values(), last_control.values()):
                    #         if v1 != v2:
                    #             last_control_changed = frame
                    #             break

                    control.steer += random.uniform(-0.1, 0.1)
                    client.send_control(control)
Exemple #29
0
def run_carla_client(args):
    # Here we will run 3 episodes with 300 frames each.
    number_of_episodes = 3
    frames_per_episode = 300

    # We assume the CARLA server is already waiting for a client to connect at
    # host:port. To create a connection we can use the `make_carla_client`
    # context manager, it creates a CARLA client object and starts the
    # connection. It will throw an exception if something goes wrong. The
    # context manager makes sure the connection is always cleaned up on exit.
    with make_carla_client(args.host, args.port) as client:
        print('CarlaClient connected')

        for episode in range(0, number_of_episodes):
            # Start a new episode.

            if args.settings_filepath is None:

                # Create a CarlaSettings object. This object is a wrapper around
                # the CarlaSettings.ini file. Here we set the configuration we
                # want for the new episode.
                settings = CarlaSettings()
                settings.set(
                    SynchronousMode=True,
                    SendNonPlayerAgentsInfo=True,
                    NumberOfVehicles=20,
                    NumberOfPedestrians=40,
                    WeatherId=random.choice([1, 3, 7, 8, 14]),
                    QualityLevel=args.quality_level)
                settings.randomize_seeds()

                # Now we want to add a couple of cameras to the player vehicle.
                # We will collect the images produced by these cameras every
                # frame.

                # The default camera captures RGB images of the scene.
                camera0 = Camera('CameraRGB')
                # Set image resolution in pixels.
                camera0.set_image_size(800, 600)
                # Set its position relative to the car in meters.
                camera0.set_position(0.30, 0, 1.30)
                settings.add_sensor(camera0)

                # Let's add another camera producing ground-truth depth.
                camera1 = Camera('CameraDepth', PostProcessing='Depth')
                camera1.set_image_size(800, 600)
                camera1.set_position(0.30, 0, 1.30)
                settings.add_sensor(camera1)

                if args.lidar:
                    lidar = Lidar('Lidar32')
                    lidar.set_position(0, 0, 2.50)
                    lidar.set_rotation(0, 0, 0)
                    lidar.set(
                        Channels=32,
                        Range=50,
                        PointsPerSecond=100000,
                        RotationFrequency=10,
                        UpperFovLimit=10,
                        LowerFovLimit=-30)
                    settings.add_sensor(lidar)

            else:

                # Alternatively, we can load these settings from a file.
                with open(args.settings_filepath, 'r') as fp:
                    settings = fp.read()

            # Now we load these settings into the server. The server replies
            # with a scene description containing the available start spots for
            # the player. Here we can provide a CarlaSettings object or a
            # CarlaSettings.ini file as string.
            scene = client.load_settings(settings)

            # Choose one player start at random.
            number_of_player_starts = len(scene.player_start_spots)
            player_start = random.randint(0, max(0, number_of_player_starts - 1))

            # Notify the server that we want to start the episode at the
            # player_start index. This function blocks until the server is ready
            # to start the episode.
            print('Starting new episode at %r...' % scene.map_name)
            client.start_episode(player_start)

            # Iterate every frame in the episode.
            for frame in range(0, frames_per_episode):

                # Read the data produced by the server this frame.
                measurements, sensor_data = client.read_data()

                # Print some of the measurements.
                print_measurements(measurements)

                # Save the images to disk if requested.
                if args.save_images_to_disk:
                    for name, measurement in sensor_data.items():
                        filename = args.out_filename_format.format(episode, name, frame)
                        measurement.save_to_disk(filename)

                # We can access the encoded data of a given image as numpy
                # array using its "data" property. For instance, to get the
                # depth value (normalized) at pixel X, Y
                #
                #     depth_array = sensor_data['CameraDepth'].data
                #     value_at_pixel = depth_array[Y, X]
                #

                # Now we have to send the instructions to control the vehicle.
                # If we are in synchronous mode the server will pause the
                # simulation until we send this control.

                if not args.autopilot:

                    client.send_control(
                        steer=random.uniform(-1.0, 1.0),
                        throttle=0.5,
                        brake=0.0,
                        hand_brake=False,
                        reverse=False)

                else:

                    # Together with the measurements, the server has sent the
                    # control that the in-game autopilot would do this frame. We
                    # can enable autopilot by sending back this control to the
                    # server. We can modify it if wanted, here for instance we
                    # will add some noise to the steer.

                    control = measurements.player_measurements.autopilot_control
                    control.steer += random.uniform(-0.1, 0.1)
                    client.send_control(control)
Exemple #30
0
    def reset_env(self):
        self.num_steps = 0
        self.total_reward = 0
        self.prev_measurement = None
        self.prev_image = None
        self.episode_id = datetime.today().strftime("%Y-%m-%d_%H-%M-%S_%f")
        self.measurements_file = None

        # Create a CarlaSettings object. This object is a wrapper around
        # the CarlaSettings.ini file. Here we set the configuration we
        # want for the new episode.
        settings = CarlaSettings()
        # If config["scenarios"] is a single scenario, then use it if it's an array of scenarios, randomly choose one and init
        if isinstance(self.config["scenarios"],dict):
            self.scenario = self.config["scenarios"]
        else: #isinstance array of dict
            self.scenario = random.choice(self.config["scenarios"])
        assert self.scenario["city"] == self.city, (self.scenario, self.city)
        self.weather = random.choice(self.scenario["weather_distribution"])
        settings.set(
            SynchronousMode=True,
            SendNonPlayerAgentsInfo=True,
            NumberOfVehicles=self.scenario["num_vehicles"],
            NumberOfPedestrians=self.scenario["num_pedestrians"],
            WeatherId=self.weather)
        settings.randomize_seeds()

        if self.config["use_depth_camera"]:
            camera1 = Camera("CameraDepth", PostProcessing="Depth")
            camera1.set_image_size(
                self.config["render_x_res"], self.config["render_y_res"])
            camera1.set_position(0.30, 0, 1.30)
            settings.add_sensor(camera1)

        camera2 = Camera("CameraRGB")
        camera2.set_image_size(
            self.config["render_x_res"], self.config["render_y_res"])
        camera2.set_position(0.30, 0, 1.30)
        settings.add_sensor(camera2)

        # Setup start and end positions
        scene = self.client.load_settings(settings)
        positions = scene.player_start_spots
        self.start_pos = positions[self.scenario["start_pos_id"]]
        self.end_pos = positions[self.scenario["end_pos_id"]]
        self.start_coord = [
            self.start_pos.location.x // 100, self.start_pos.location.y // 100]
        self.end_coord = [
            self.end_pos.location.x // 100, self.end_pos.location.y // 100]
        print(
            "Start pos {} ({}), end {} ({})".format(
                self.scenario["start_pos_id"], self.start_coord,
                self.scenario["end_pos_id"], self.end_coord))

        # Notify the server that we want to start the episode at the
        # player_start index. This function blocks until the server is ready
        # to start the episode.
        print("Starting new episode...")
        self.client.start_episode(self.scenario["start_pos_id"])

        image, py_measurements = self._read_observation()
        self.prev_measurement = py_measurements
        return self.encode_obs(self.preprocess_image(image), py_measurements)
Exemple #31
0
def run_carla_client(host, port, autopilot_on, save_images_to_disk,
                     image_filename_format, settings_filepath):
    # Here we will run 3 episodes with 300 frames each.
    number_of_episodes = 3
    frames_per_episode = 300

    # We assume the CARLA server is already waiting for a client to connect at
    # host:port. To create a connection we can use the `make_carla_client`
    # context manager, it creates a CARLA client object and starts the
    # connection. It will throw an exception if something goes wrong. The
    # context manager makes sure the connection is always cleaned up on exit.
    with make_carla_client(host, port) as client:
        print('CarlaClient connected')

        for episode in range(0, number_of_episodes):
            # Start a new episode.

            if settings_filepath is None:

                # Create a CarlaSettings object. This object is a wrapper around
                # the CarlaSettings.ini file. Here we set the configuration we
                # want for the new episode.
                settings = CarlaSettings()
                settings.set(SynchronousMode=True,
                             SendNonPlayerAgentsInfo=True,
                             NumberOfVehicles=20,
                             NumberOfPedestrians=40,
                             WeatherId=random.choice([1, 3, 7, 8, 14]))
                settings.randomize_seeds()

                # Now we want to add a couple of cameras to the player vehicle.
                # We will collect the images produced by these cameras every
                # frame.

                # The default camera captures RGB images of the scene.
                camera0 = Camera('CameraRGB')
                # Set image resolution in pixels.
                camera0.set_image_size(800, 600)
                # Set its position relative to the car in centimeters.
                camera0.set_position(30, 0, 130)
                settings.add_sensor(camera0)

                # Let's add another camera producing ground-truth depth.
                camera1 = Camera('CameraDepth', PostProcessing='Depth')
                camera1.set_image_size(800, 600)
                camera1.set_position(30, 0, 130)
                settings.add_sensor(camera1)

            else:

                # Alternatively, we can load these settings from a file.
                with open(settings_filepath, 'r') as fp:
                    settings = fp.read()

            # Now we load these settings into the server. The server replies
            # with a scene description containing the available start spots for
            # the player. Here we can provide a CarlaSettings object or a
            # CarlaSettings.ini file as string.
            scene = client.load_settings(settings)

            # Choose one player start at random.
            number_of_player_starts = len(scene.player_start_spots)
            player_start = random.randint(0, max(0,
                                                 number_of_player_starts - 1))

            # Notify the server that we want to start the episode at the
            # player_start index. This function blocks until the server is ready
            # to start the episode.
            print('Starting new episode...')
            client.start_episode(player_start)

            # Iterate every frame in the episode.
            for frame in range(0, frames_per_episode):

                # Read the data produced by the server this frame.
                measurements, sensor_data = client.read_data()

                # Print some of the measurements.
                print_measurements(measurements)

                # Save the images to disk if requested.
                if save_images_to_disk:
                    for name, image in sensor_data.items():
                        image.save_to_disk(
                            image_filename_format.format(episode, name, frame))

                # We can access the encoded data of a given image as numpy
                # array using its "data" property. For instance, to get the
                # depth value (normalized) at pixel X, Y
                #
                #     depth_array = sensor_data['CameraDepth'].data
                #     value_at_pixel = depth_array[Y, X]
                #

                # Now we have to send the instructions to control the vehicle.
                # If we are in synchronous mode the server will pause the
                # simulation until we send this control.

                if not autopilot_on:

                    client.send_control(steer=random.uniform(-1.0, 1.0),
                                        throttle=0.5,
                                        brake=0.0,
                                        hand_brake=False,
                                        reverse=False)

                else:

                    # Together with the measurements, the server has sent the
                    # control that the in-game autopilot would do this frame. We
                    # can enable autopilot by sending back this control to the
                    # server. We can modify it if wanted, here for instance we
                    # will add some noise to the steer.

                    control = measurements.player_measurements.autopilot_control
                    control.steer += random.uniform(-0.1, 0.1)
                    client.send_control(control)
Exemple #32
0
def run_carla_client(args):
    # Here we will run 3 episodes with 300 frames each.
    number_of_episodes = 60000
    frames_per_episode = 400

    # We assume the CARLA server is already waiting for a client to connect at
    # host:port. To create a connection we can use the `make_carla_client`
    # context manager, it creates a CARLA client object and starts the
    # connection. It will throw an exception if something goes wrong. The
    # context manager makes sure the connection is always cleaned up on exit.
    with make_carla_client(args.host, args.port, 30) as client:
        print('CarlaClient connected')

        # =============================================================================
        #       Global initialisations
        # =============================================================================
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        sess = tf.Session(config=config)
        K.set_session(sess)

        state_size = {
            'state_2D': (
                64,
                64,
                9,
            ),
            'state_1D': (17, )
        }
        action_size = (5, )

        critic = Critic(sess, state_size, action_size, CRITIC_LR)
        critic.target_train()
        actor = Actor(sess, state_size, action_size, ACTOR_LR)
        actor.target_train()
        memory = ExperienceMemory(100000, False)

        target_update_counter = 0
        target_update_freq = TARGET_UPDATE_BASE_FREQ

        explore_rate = 0.2

        success_counter = 0

        total_t = 0
        t = 0
        #NOTE Ez csak egy próba, eztmég át kell alakítani
        target = {
            'pos': np.array([-3.7, 236.4, 0.9]),
            'ori': np.array([0.00, -1.00, 0.00])
        }

        if args.settings_filepath is None:
            # Create a CarlaSettings object. This object is a wrapper around
            # the CarlaSettings.ini file. Here we set the configuration we
            # want for the new episode.
            settings = CarlaSettings()
            settings.set(SynchronousMode=True,
                         SendNonPlayerAgentsInfo=True,
                         NumberOfVehicles=0,
                         NumberOfPedestrians=0,
                         WeatherId=random.choice([1]),
                         QualityLevel=args.quality_level)
            #            settings.randomize_seeds()
            #
            #            settings.randomize_seeds()
            # The default camera captures RGB images of the scene.
            camera0 = Camera('CameraRGB')
            # Set image resolution in pixels.
            camera0.set_image_size(64, 64)
            # Set its position relative to the car in centimeters.
            camera0.set_position(0.30, 0, 1.30)
            settings.add_sensor(camera0)
        else:

            # Alternatively, we can load these settings from a file.
            with open(args.settings_filepath, 'r') as fp:
                settings = fp.read()
        scene = client.load_settings(settings)

        # =============================================================================
        #       EPISODES LOOP
        # =============================================================================
        for episode in range(0, number_of_episodes):
            # Start a new episode.
            # Choose one player start at random.
            number_of_player_starts = len(scene.player_start_spots)
            player_start = random.randint(0, max(0,
                                                 number_of_player_starts - 1))
            player_start = 0
            total_reward = 0.
            # Notify the server that we want to start the episode at the
            # player_start index. This function blocks until the server is ready
            # to start the episode.
            print('Starting new episode...')
            client.start_episode(player_start)

            #TODO Ezen belül kéne implementálni a tanuló algoritmusunkat

            # =============================================================================
            #           Episodic intitialisations
            # =============================================================================
            collisions = {'car': 0, 'ped': 0, 'other': 0}
            reverse = -1.0
            measurements, sensor_data = client.read_data()
            state = get_state_from_data(measurements, sensor_data, reverse)
            goal = get_goal_from_data(target)
            t = 0
            stand_still_counter = 0
            # =============================================================================
            #           STEPS LOOP
            # =============================================================================
            for frame in range(0, frames_per_episode):
                t = t + 1
                total_t += 1
                target_update_counter += 1
                explore_dev = 0.6 / (1 + total_t / 30000)
                explore_rate = 0.3 / (1 + total_t / 30000)
                # Print some of the measurements.
                #   print_measurements(measurements)

                # Save the images to disk if requested.
                if args.save_images_to_disk and False:
                    for name, measurement in sensor_data.items():
                        filename = args.out_filename_format.format(
                            episode, name, frame)
                        measurement.save_to_disk(filename)

                if state['state_1D'][9] < 5 and t > 50:
                    stand_still_counter += 1
                else:
                    stand_still_counter = 0
                #Calculate the action
                a_pred = actor.model.predict([
                    np.expand_dims(state['state_2D'], 0),
                    np.expand_dims(np.concatenate((state['state_1D'], goal)),
                                   0)
                ])[0]
                #Add exploration noise to action
                a = add_noise(a_pred, explore_dev, explore_rate)
                control = get_control_from_a(a)
                #Sendcontrol to the server
                client.send_control(control)

                #
                # =============================================================================
                #               TRAINING THE NETWORKS
                # =============================================================================
                if memory.num_items > 6000:
                    batch, indeces = memory.sample_experience(MINI_BATCH_SIZE)
                    raw_states = [[e[0]['state_2D'], e[0]['state_1D']]
                                  for e in batch]
                    goals = np.asarray([e[5] for e in batch])
                    states = {
                        'state_2D':
                        np.atleast_2d(np.asarray([e[0]
                                                  for e in raw_states[:]])),
                        'state_1D':
                        np.atleast_2d(
                            np.asarray([
                                np.concatenate([e[1], goals[i]], axis=-1)
                                for i, e in enumerate(raw_states[:])
                            ]))
                    }

                    actions = np.asarray([e[1] for e in batch])
                    rewards = np.asarray([np.sum(e[2])
                                          for e in batch]).reshape(-1, 1)

                    raw_new_states = [[e[3]['state_2D'], e[3]['state_1D']]
                                      for e in batch]
                    new_states = {
                        'state_2D':
                        np.atleast_2d(
                            np.asarray([e[0] for e in raw_new_states[:]])),
                        'state_1D':
                        np.atleast_2d(
                            np.asarray([
                                np.concatenate([e[1], goals[i]], axis=-1)
                                for i, e in enumerate(raw_new_states[:])
                            ]))
                    }

                    overs = np.asarray([e[4] for e in batch]).reshape(-1, 1)

                    best_a_preds = actor.target_model.predict(
                        [new_states['state_2D'], new_states['state_1D']])
                    max_qs = critic.target_model.predict([
                        new_states['state_2D'], new_states['state_1D'],
                        best_a_preds
                    ])

                    ys = rewards + (1 - overs) * GAMMA * max_qs
                    #Train Critic network
                    critic.model.train_on_batch(
                        [states['state_2D'], states['state_1D'], actions], ys)
                    #Train Actor network
                    a_for_grads = actor.model.predict(
                        [states['state_2D'], states['state_1D']])
                    a_grads = critic.gradients(states, a_for_grads)
                    actor.train(states, a_grads)

                    #Train target networks
                    if target_update_counter >= int(target_update_freq):
                        target_update_counter = 0
                        target_update_freq = target_update_freq * TARGET_UPDATE_MULTIPLIER
                        critic.target_train()
                        actor.target_train()
# =============================================================================
#               GET AND STORE OBSERVATIONS
# =============================================================================
#Get next measurements
                measurements, sensor_data = client.read_data()
                new_state = get_state_from_data(measurements, sensor_data,
                                                reverse, state)

                #TODO Calculate reward
                r_goal, success = calculate_goal_reward(
                    np.atleast_2d(new_state['state_1D']), goal)
                r_general, collisions = calculate_general_reward(
                    measurements, collisions)
                over = stand_still_counter > 30 or success
                success_counter += int(bool(success) * 1)
                total_reward += r_goal
                total_reward += r_general
                #Store observation
                if t > 10:
                    experience = pd.DataFrame(
                        [[
                            state, a,
                            np.array([r_goal, r_general]), new_state,
                            bool(over), goal, episode, 0
                        ]],
                        columns=['s', 'a', 'r', "s'", 'over', 'g', 'e', 'p'],
                        copy=True)
                    memory.add_experience(experience)

                #Set the state to the next state
                state = new_state
                if over:
                    break
            sub_goal = deepcopy(state['state_1D'][0:6])
            print(str(episode) + ". Episode###################")
            print("Total reward: " + str(total_reward))
            print("Success counter: " + str(success_counter))
            if (episode % 10 == 0):
                print("############## DEBUG LOG ################")
                print("Memory state: " + str(memory.num_items))
                print("Target update counter: " + str(target_update_counter))
                print("Exploration rate: " + str(explore_rate))
                print("Exploration dev: " + str(explore_dev))
                print("Total timesteps: " + str(total_t))
                print("Average episode length: " + str(total_t /
                                                       (episode + 1)))
                print("#########################################")


# =============================================================================
#           REPLAY FOR SUBGOALS
# =============================================================================
            batch = memory.get_last_episode(t)
            raw_new_states = [[e[3]['state_2D'], e[3]['state_1D']]
                              for e in batch]
            new_states = {
                'state_2D':
                np.atleast_2d(np.asarray([e[0] for e in raw_new_states[:]])),
                'state_1D':
                np.atleast_2d(np.asarray([e[1] for e in raw_new_states[:]]))
            }
            rewards = np.asarray([e[2] for e in batch]).reshape(-1, 2)
            r_subgoal = calculate_goal_reward(new_states['state_1D'],
                                              sub_goal)[0]
            rewards[:, 0] = r_subgoal
            subgoal_batch = [[
                v[0], v[1],
                list(rewards)[i], v[3], v[4], sub_goal, v[6], v[7]
            ] for i, v in enumerate(batch)]
            experiences = pd.DataFrame(
                subgoal_batch,
                columns=['s', 'a', 'r', "s'", 'over', 'g', 'e', 'p'],
                copy=True)
            memory.add_experience(experiences)
Exemple #33
0
def run_carla_client(args):
    # Here we will run args._episode episodes with args._frames frames each.
    number_of_episodes = args._episode
    frames_per_episode = args._frames

    # create the carla client
    with make_carla_client(args.host, args.port, timeout=10000) as client:
        print('CarlaClient connected')

        for episode in range(0, number_of_episodes):

            if args.settings_filepath is None:

                settings = CarlaSettings()
                settings.set(
                    SynchronousMode=args.synchronous_mode,
                    SendNonPlayerAgentsInfo=True,
                    NumberOfVehicles=200,
                    NumberOfPedestrians=100,
                    WeatherId=random.choice([1, 3, 7, 8, 14]),
                    QualityLevel=args.quality_level)
                settings.randomize_seeds()

                # The default camera captures RGB images of the scene.
                camera0 = Camera('CameraRGB')
                # Set image resolution in pixels.
                camera0.set_image_size(1920, 640)
                # Set its position relative to the car in meters.
                camera0.set_position(2.00, 0, 1.30)
                settings.add_sensor(camera0)

                camera1 = Camera('CameraDepth', PostProcessing='Depth')
                camera1.set_image_size(1920, 640)
                camera1.set_position(2.00, 0, 1.30)
                settings.add_sensor(camera1)

                #slows down the simulation too much by processing segmentation before saving
                #camera2 = Camera('CameraSegmentation', PostProcessing='SemanticSegmentation')
                #camera2.set_image_size(1920, 640)
                #camera2.set_position(2.00, 0, 1.30)
                #settings.add_sensor(camera2)

                if args.lidar:
                    lidar = Lidar('Lidar32')
                    lidar.set_position(0, 0, 2.50)
                    lidar.set_rotation(0, 0, 0)
                    lidar.set(
                        Channels=32,
                        Range=50,
                        PointsPerSecond=100000,
                        RotationFrequency=10,
                        UpperFovLimit=10,
                        LowerFovLimit=-30)
                    settings.add_sensor(lidar)

            else:
                # Alternatively, we can load these settings from a file.
                with open(args.settings_filepath, 'r') as fp:
                    settings = fp.read()

            scene = client.load_settings(settings)

            # Choose one player start at random.
            #if intersection flag is set start near and facing an intersection
            if args.intersection_start:
                text_file = open(args.intersection_file, "r")
                player_intersection_start = text_file.read().split(' ')
                player_start = int(player_intersection_start[random.randint(0, max(0, len(player_intersection_start) - 1))])
                text_file.close()
                print("Player start index="+str(player_start))
            else:
                number_of_player_starts = len(scene.player_start_spots)
                player_start = random.randint(0, max(0, number_of_player_starts - 1))

            print('Starting new episode at %r...' % scene.map_name)
            client.start_episode(player_start)

            # create folders for current episode
            file_loc = args.file_loc_format.format(episode)
            if not os.path.exists(file_loc):
                os.makedirs(file_loc)

            file_loc_tracklets = file_loc + "/tracklets/"
            if not os.path.exists(file_loc_tracklets):
                os.makedirs(file_loc_tracklets)

            file_loc_grid = file_loc + "/gridmap/"
            if not os.path.exists(file_loc_grid):
                os.makedirs(file_loc_grid)

            print('Data saved in %r' % file_loc)

            # Iterate every frame in the episode.
            for frame in range(0, frames_per_episode):

                # Read the data produced by the server this frame.
                measurements, sensor_data = client.read_data()

                #discard episode if misbehaviour flag is set and a form of collision is detected
                if args.no_misbehaviour:
                    player_measurements = measurements.player_measurements
                    col_cars=player_measurements.collision_vehicles
                    col_ped=player_measurements.collision_pedestrians
                    col_other=player_measurements.collision_other
                    if col_cars + col_ped + col_other > 0:
                        print("MISBEHAVIOUR DETECTED! Discarding Episode... \n")
                        break

                # Print some of the measurements.
                print_measurements(measurements)

                # Save the images to disk if requested. Skip first couple of images due to setup time.
                if args.save_images_to_disk and frame > 19:
                    player_measurements = measurements.player_measurements

                    # process player odometry
                    yaw = ((player_measurements.transform.rotation.yaw - yaw_shift - 180) % 360) - 180

                    #calculate yaw_rate
                    game_time = np.int64(measurements.game_timestamp)
                    time_diff = (game_time - prev_time) / 1000 # ms -> sec
                    prev_time = game_time
                    if time_diff == 0:
                        yaw_rate = 0
                    else:
                        yaw_rate = (180 - abs(abs(yaw - yaw_old) - 180))/time_diff * np.sign(yaw-yaw_old)

                    yaw_old = yaw

                    #write odometry data to .txt
                    with open(file_loc+"odometry_t_mus-x_m-y_m-yaw_deg-yr_degs-v_ms.txt", "a") as text_file:
                        text_file.write("%d %f %f %f %f %f\n" % \
                           (round(args.start_time+measurements.game_timestamp),
                            player_measurements.transform.location.x - shift_x,
                            player_measurements.transform.location.y - shift_y,
                            -yaw,
                            -yaw_rate,
                            player_measurements.forward_speed))

                    # need modified saver to save converted segmentation
                    for name, measurement in sensor_data.items():
                        filename = args.out_filename_format.format(episode, name, frame)
                        if name == 'CameraSegmentation':
                            measurement.save_to_disk_converted(filename)
                        else:
                            measurement.save_to_disk(filename)

                    with open(file_loc_tracklets+str(round(args.start_time+measurements.game_timestamp))+".txt", "w") as text_file:
                        im = Image.new('L', (256*6, 256*6), (127))
                        shift = 256*6/2
                        draw = ImageDraw.Draw(im)
                        # create occupancy map and save participant info in tracklet txt file similar to KITTI
                        for agent in measurements.non_player_agents:
                            if agent.HasField('vehicle') or agent.HasField('pedestrian'):
                                participant = agent.vehicle if agent.HasField('vehicle') else agent.pedestrian
                                angle = cart2pol(participant.transform.orientation.x, participant.transform.orientation.y)
                                text_file.write("%d %f %f %f %f %f\n" % \
                                 (agent.id,
                                  participant.transform.location.x,
                                  participant.transform.location.y,
                                  angle,
                                  participant.bounding_box.extent.x,
                                  participant.bounding_box.extent.y))
                                polygon = agent2world(participant, angle)
                                polygon = world2player(polygon, math.radians(-yaw), player_measurements.transform)
                                polygon = player2image(polygon, shift, multiplier=25)
                                polygon = [tuple(row) for row in polygon.T]

                                draw.polygon(polygon, 0, 0)
                        im = im.resize((256,256), Image.ANTIALIAS)
                        im = im.rotate(imrotate)
                        im.save(file_loc_grid + 'gridmap_'+ str(round(args.start_time+measurements.game_timestamp)) +'_occupancy' + '.png')

                else:
                    # get first values
                    yaw_shift = measurements.player_measurements.transform.rotation.yaw
                    yaw_old = ((yaw_shift - 180) % 360) - 180
                    imrotate = round(yaw_old)+90
                    shift_x = measurements.player_measurements.transform.location.x
                    shift_y = measurements.player_measurements.transform.location.y
                    prev_time = np.int64(measurements.game_timestamp)


                if not args.autopilot:
                    client.send_control(
                        steer=random.uniform(-1.0, 1.0),
                        throttle=0.5,
                        brake=0.0,
                        hand_brake=False,
                        reverse=False)
                else:
                    control = measurements.player_measurements.autopilot_control
                    control.steer += random.uniform(-0.01, 0.01)
                    client.send_control(control)
Exemple #34
0
def run_carla_client( args):
    # Here we will run 3 episodes with 300 frames each.
    number_of_episodes = 150
    frames_per_episode = 500

    # for start_i in range(150):
    #     if start_i%4==0:
    #         output_folder = 'Packages/CARLA_0.8.2/PythonClient/new_data-viz/test_' + str(start_i)
    #         if not os.path.exists(output_folder):
    #             os.mkdir(output_folder)
    #             print( "make "+str(output_folder))
    # ./CarlaUE4.sh -carla-server  -benchmark -fps=17 -windowed
    # carla-server "/usr/local/carla/Unreal/CarlaUE4/CarlaUE4.uproject" /usr/local/carla/Maps/Town03 -benchmark -fps=10 -windowed


    # We assume the CARLA server is already waiting for a client to connect at
    # host:port. To create a connection we can use the `make_carla_client`
    # context manager, it creates a CARLA client object and starts the
    # connection. It will throw an exception if something goes wrong. The
    # context manager makes sure the connection is always cleaned up on exit.
    with make_carla_client(args.host, args.port) as client:

        print('CarlaClient connected')
        global episode_nbr
        print (episode_nbr)
        for episode in range(0,number_of_episodes):
            if episode % 1 == 0:
                output_folder = 'Datasets/carla-sync/train/test_' + str(episode)
                if not os.path.exists(output_folder+"/cameras.p"):
                    # Start a new episode.
                    episode_nbr=episode
                    frame_step = 1  # Save one image every 100 frames
                    pointcloud_step=50
                    image_size = [800, 600]
                    camera_local_pos = [0.3, 0.0, 1.3]  # [X, Y, Z]
                    camera_local_rotation = [0, 0, 0]  # [pitch(Y), yaw(Z), roll(X)]
                    fov = 70
                    # Create a CarlaSettings object. This object is a wrapper around
                    # the CarlaSettings.ini file. Here we set the configuration we
                    # want for the new episode.
                    settings = CarlaSettings()
                    settings.set(
                        SynchronousMode=True,
                        SendNonPlayerAgentsInfo=True,
                        NumberOfVehicles=50,
                        NumberOfPedestrians=200,
                        WeatherId=random.choice([1, 3, 7, 8, 14]),
                        QualityLevel=args.quality_level)
                    settings.randomize_seeds()

                    # Now we want to add a couple of cameras to the player vehicle.
                    # We will collect the images produced by these cameras every
                    # frame.


                    camera1 = Camera('CameraDepth', PostProcessing='Depth', FOV=fov)
                    camera1.set_image_size(*image_size)
                    camera1.set_position(*camera_local_pos)
                    camera1.set_rotation(*camera_local_rotation)
                    settings.add_sensor(camera1)

                    camera2 = Camera('CameraRGB', PostProcessing='SceneFinal', FOV=fov)
                    camera2.set_image_size(*image_size)
                    camera2.set_position(*camera_local_pos)
                    camera2.set_rotation(*camera_local_rotation)
                    settings.add_sensor(camera2)

                    camera3 = Camera('CameraSeg', PostProcessing='SemanticSegmentation', FOV=fov)
                    camera3.set_image_size(*image_size)
                    camera3.set_position(*camera_local_pos)
                    camera3.set_rotation(*camera_local_rotation)
                    settings.add_sensor(camera3)



                    # Now we load these settings into the server. The server replies
                    # with a scene description containing the available start spots for
                    # the player. Here we can provide a CarlaSettings object or a
                    # CarlaSettings.ini file as string.
                    scene = client.load_settings(settings)

                    # Choose one player start at random.
                    number_of_player_starts = len(scene.player_start_spots)
                    player_start = episode#random.randint(0, max(0, number_of_player_starts - 1))

                    output_folder = 'Datasets/carla-sync/train/test_' + str(episode)
                    # Notify the server that we want to start the episode at the
                    # player_start index. This function blocks until the server is ready
                    # to start the episode.
                    print('Starting new episode...')
                    client.start_episode(player_start)


                    cameras_dict = {}
                    pedestrians_dict = {}
                    cars_dict = {}
                    # Compute the camera transform matrix
                    camera_to_car_transform = camera2.get_unreal_transform()
                    # (Intrinsic) (3, 3) K Matrix
                    K = np.identity(3)
                    K[0, 2] = image_size[0] / 2.0
                    K[1, 2] = image_size[1] / 2.0
                    K[0, 0] = K[1, 1] = image_size[0] / (2.0 * np.tan(fov * np.pi / 360.0))
                    with open(output_folder + '/camera_intrinsics.p', 'w') as camfile:
                        pickle.dump(K, camfile)


                    # Iterate every frame in the episode.
                    for frame in range(0, frames_per_episode):

                        # Read the data produced by the server this frame.
                        measurements, sensor_data = client.read_data()

                        # Print some of the measurements.
                        #print_measurements(measurements)
                        if not frame % frame_step:
                            # Save the images to disk if requested.

                            for name, measurement in sensor_data.items():
                                filename = args.out_filename_format.format(episode, name, frame)
                                print (filename)
                                measurement.save_to_disk(filename)

                            # We can access the encoded data of a given image as numpy
                            # array using its "data" property. For instance, to get the
                            # depth value (normalized) at pixel X, Y
                            #
                            #     depth_array = sensor_data['CameraDepth'].data
                            #     value_at_pixel = depth_array[Y, X]
                            #

                            # Now we have to send the instructions to control the vehicle.
                            # If we are in synchronous mode the server will pause the
                            # simulation until we send this control.

                            # RGB image [[[r,g,b],..[r,g,b]],..[[r,g,b],..[r,g,b]]]
                            image_RGB = to_rgb_array(sensor_data['CameraRGB'])

                            labels=labels_to_array(sensor_data['CameraSeg'])[:,:,np.newaxis]

                            image_seg = np.tile(labels, (1, 1, 3))
                            depth_array = sensor_data['CameraDepth'].data*1000


                            # 2d to (camera) local 3d
                            # We use the image_RGB to colorize each 3D point, this is optional.
                            # "max_depth" is used to keep only the points that are near to the
                            # camera, meaning 1.0 the farest points (sky)
                            if not frame % pointcloud_step:
                                point_cloud = depth_to_local_point_cloud(
                                    sensor_data['CameraDepth'],
                                    image_RGB,
                                    max_depth=args.far
                                )

                                point_cloud_seg = depth_to_local_point_cloud(
                                    sensor_data['CameraDepth'],
                                    segmentation=image_seg,
                                    max_depth=args.far
                                )

                            # (Camera) local 3d to world 3d.
                            # Get the transform from the player protobuf transformation.
                            world_transform = Transform(
                                measurements.player_measurements.transform
                            )

                            # Compute the final transformation matrix.
                            car_to_world_transform = world_transform * camera_to_car_transform

                            # Car to World transformation given the 3D points and the
                            # transformation matrix.
                            point_cloud.apply_transform(car_to_world_transform)
                            point_cloud_seg.apply_transform(car_to_world_transform)

                            Rt = car_to_world_transform.matrix
                            Rt_inv = car_to_world_transform.inverse().matrix
                            # R_inv=world_transform.inverse().matrix
                            cameras_dict[frame] = {}
                            cameras_dict[frame]['inverse_rotation'] = Rt_inv[:]
                            cameras_dict[frame]['rotation'] = Rt[:]
                            cameras_dict[frame]['translation'] = Rt_inv[0:3, 3]
                            cameras_dict[frame]['camera_to_car'] = camera_to_car_transform.matrix

                            # Get non-player info
                            vehicles = {}
                            pedestrians = {}
                            for agent in measurements.non_player_agents:
                                # check if the agent is a vehicle.
                                if agent.HasField('vehicle'):
                                    pos = agent.vehicle.transform.location
                                    pos_vector = np.array([[pos.x], [pos.y], [pos.z], [1.0]])

                                    trnasformed_3d_pos = np.dot(Rt_inv, pos_vector)
                                    pos2d = np.dot(K, trnasformed_3d_pos[:3])

                                    # Normalize the point
                                    norm_pos2d = np.array([
                                        pos2d[0] / pos2d[2],
                                        pos2d[1] / pos2d[2],
                                        pos2d[2]])

                                    # Now, pos2d contains the [x, y, d] values of the image in pixels (where d is the depth)
                                    # You can use the depth to know the points that are in front of the camera (positive depth).

                                    x_2d = image_size[0] - norm_pos2d[0]
                                    y_2d = image_size[1] - norm_pos2d[1]
                                    vehicles[agent.id] = {}
                                    vehicles[agent.id]['transform'] = [agent.vehicle.transform.location.x,
                                                                       agent.vehicle.transform.location.y,
                                                                       agent.vehicle.transform.location.z]
                                    vehicles[agent.id][
                                        'bounding_box.transform'] = agent.vehicle.bounding_box.transform.location.z

                                    vehicles[agent.id]['yaw'] = agent.vehicle.transform.rotation.yaw
                                    vehicles[agent.id]['bounding_box'] = [agent.vehicle.bounding_box.extent.x,
                                                                          agent.vehicle.bounding_box.extent.y,
                                                                          agent.vehicle.bounding_box.extent.z]
                                    vehicle_transform = Transform(agent.vehicle.bounding_box.transform)
                                    pos = agent.vehicle.transform.location

                                    bbox3d = agent.vehicle.bounding_box.extent

                                    # Compute the 3D bounding boxes
                                    # f contains the 4 points that corresponds to the bottom
                                    f = np.array([[pos.x + bbox3d.x, pos.y - bbox3d.y,
                                                   pos.z - bbox3d.z + agent.vehicle.bounding_box.transform.location.z],
                                                  [pos.x + bbox3d.x, pos.y + bbox3d.y,
                                                   pos.z - bbox3d.z + agent.vehicle.bounding_box.transform.location.z],
                                                  [pos.x - bbox3d.x, pos.y + bbox3d.y,
                                                   pos.z - bbox3d.z + agent.vehicle.bounding_box.transform.location.z],
                                                  [pos.x - bbox3d.x, pos.y - bbox3d.y,
                                                   pos.z - bbox3d.z + agent.vehicle.bounding_box.transform.location.z],
                                                  [pos.x + bbox3d.x, pos.y - bbox3d.y,
                                                   pos.z + bbox3d.z + agent.vehicle.bounding_box.transform.location.z],
                                                  [pos.x + bbox3d.x, pos.y + bbox3d.y,
                                                   pos.z + bbox3d.z + agent.vehicle.bounding_box.transform.location.z],
                                                  [pos.x - bbox3d.x, pos.y + bbox3d.y,
                                                   pos.z + bbox3d.z + agent.vehicle.bounding_box.transform.location.z],
                                                  [pos.x - bbox3d.x, pos.y - bbox3d.y,
                                                   pos.z + bbox3d.z + agent.vehicle.bounding_box.transform.location.z]])

                                    f_rotated = vehicle_transform.transform_points(f)
                                    f_2D_rotated = []
                                    vehicles[agent.id]['bounding_box_coord'] = f_rotated

                                    for i in range(f.shape[0]):
                                        point = np.array([[f_rotated[i, 0]], [f_rotated[i, 1]], [f_rotated[i, 2]], [1]])
                                        transformed_2d_pos = np.dot(Rt_inv, point)
                                        pos2d = np.dot(K, transformed_2d_pos[:3])
                                        norm_pos2d = np.array([
                                            pos2d[0] / pos2d[2],
                                            pos2d[1] / pos2d[2],
                                            pos2d[2]])
                                        # print([image_size[0] - (pos2d[0] / pos2d[2]), image_size[1] - (pos2d[1] / pos2d[2])])
                                        f_2D_rotated.append(
                                            np.array([image_size[0] - norm_pos2d[0], image_size[1] - norm_pos2d[1]]))
                                    vehicles[agent.id]['bounding_box_2D'] = f_2D_rotated


                                elif agent.HasField('pedestrian'):
                                    pedestrians[agent.id] = {}
                                    pedestrians[agent.id]['transform'] = [agent.pedestrian.transform.location.x,
                                                                          agent.pedestrian.transform.location.y,
                                                                          agent.pedestrian.transform.location.z]
                                    pedestrians[agent.id]['yaw'] = agent.pedestrian.transform.rotation.yaw
                                    pedestrians[agent.id]['bounding_box'] = [agent.pedestrian.bounding_box.extent.x,
                                                                             agent.pedestrian.bounding_box.extent.y,
                                                                             agent.pedestrian.bounding_box.extent.z]
                                    # get the needed transformations
                                    # remember to explicitly make it Transform() so you can use transform_points()
                                    pedestrian_transform = Transform(agent.pedestrian.transform)
                                    bbox_transform = Transform(agent.pedestrian.bounding_box.transform)

                                    # get the box extent
                                    ext = agent.pedestrian.bounding_box.extent
                                    # 8 bounding box vertices relative to (0,0,0)
                                    bbox = np.array([
                                        [  ext.x,   ext.y,   ext.z],
                                        [- ext.x,   ext.y,   ext.z],
                                        [  ext.x, - ext.y,   ext.z],
                                        [- ext.x, - ext.y,   ext.z],
                                        [  ext.x,   ext.y, - ext.z],
                                        [- ext.x,   ext.y, - ext.z],
                                        [  ext.x, - ext.y, - ext.z],
                                        [- ext.x, - ext.y, - ext.z]
                                    ])

                                    # transform the vertices respect to the bounding box transform
                                    bbox = bbox_transform.transform_points(bbox)

                                    # the bounding box transform is respect to the pedestrian transform
                                    # so let's transform the points relative to it's transform
                                    bbox = pedestrian_transform.transform_points(bbox)

                                    # pedestrian's transform is relative to the world, so now,
                                    # bbox contains the 3D bounding box vertices relative to the world
                                    pedestrians[agent.id]['bounding_box_coord'] =copy.deepcopy(bbox)

                                    # Additionally, you can print these vertices to check that is working
                                    f_2D_rotated=[]
                                    ys=[]
                                    xs=[]
                                    zs=[]
                                    for vertex in bbox:
                                        pos_vector = np.array([
                                            [vertex[0,0]],  # [[X,
                                            [vertex[0,1]],  #   Y,
                                            [vertex[0,2]],  #   Z,
                                            [1.0]           #   1.0]]
                                        ])

                                        # transform the points to camera
                                        transformed_3d_pos =np.dot(Rt_inv, pos_vector)# np.dot(inv(self._extrinsic.matrix), pos_vector)
                                        zs.append(transformed_3d_pos[2])
                                        # transform the points to 2D
                                        pos2d =np.dot(K, transformed_3d_pos[:3]) #np.dot(self._intrinsic, transformed_3d_pos[:3])

                                        # normalize the 2D points
                                        pos2d = np.array([
                                            pos2d[0] / pos2d[2],
                                            pos2d[1] / pos2d[2],
                                            pos2d[2]
                                        ])

                                        # print the points in the screen
                                        if pos2d[2] > 0: # if the point is in front of the camera
                                            x_2d = image_size[0]-pos2d[0]#WINDOW_WIDTH - pos2d[0]
                                            y_2d = image_size[1]-pos2d[1]#WINDOW_HEIGHT - pos2d[1]
                                            ys.append(y_2d)
                                            xs.append(x_2d)
                                            f_2D_rotated.append( (y_2d, x_2d))
                                    if len(xs)>1:
                                        bbox=[[int(min(xs)), int(max(xs))],[int(min(ys)), int(max(ys))]]
                                        clipped_seg=labels[bbox[1][0]:bbox[1][1],bbox[0][0]:bbox[0][1]]
                                        recounted = Counter(clipped_seg.flatten())


                                        if 4 in recounted.keys() and recounted[4]>0.1*len(clipped_seg.flatten()):
                                            clipped_depth=depth_array[bbox[1][0]:bbox[1][1],bbox[0][0]:bbox[0][1]]
                                            #print (clipped_depth.shape)
                                            people_indx=np.where(clipped_seg==4)
                                            masked_depth=[]
                                            for people in zip(people_indx[0],people_indx[1] ):
                                                #print(people)
                                                masked_depth.append(clipped_depth[people])
                                            #masked_depth=clipped_depth[np.where(clipped_seg==4)]
                                            #print (masked_depth)
                                            #print ("Depth "+ str(min(zs))+" "+ str(max(zs)))
                                            #recounted = Counter(masked_depth)
                                            #print(recounted)
                                            avg_depth=np.mean(masked_depth)
                                            if avg_depth<700 and avg_depth>=min(zs)-10 and avg_depth<= max(zs)+10:
                                                #print("Correct depth")
                                                pedestrians[agent.id]['bounding_box_2D'] = f_2D_rotated
                                                pedestrians[agent.id]['bounding_box_2D_size']=recounted[4]
                                                pedestrians[agent.id]['bounding_box_2D_avg_depth']=avg_depth
                                                pedestrians[agent.id]['bounding_box_2D_depths']=zs
                                                #print ( pedestrians[agent.id].keys())
                                            #else:
                                                # print(recounted)
                                                # print ("Depth "+ str(min(zs))+" "+ str(max(zs)))


                                    #if sum(norm(depth_array-np.mean(zs))<1.0):


                                    # pedestrians[agent.id] = {}
                                    # pedestrians[agent.id]['transform'] = [agent.pedestrian.transform.location.x,
                                    #                                       agent.pedestrian.transform.location.y,
                                    #                                       agent.pedestrian.transform.location.z]
                                    # pedestrians[agent.id]['yaw'] = agent.pedestrian.transform.rotation.yaw
                                    # pedestrians[agent.id]['bounding_box'] = [agent.pedestrian.bounding_box.extent.x,
                                    #                                          agent.pedestrian.bounding_box.extent.y,
                                    #                                          agent.pedestrian.bounding_box.extent.z]
                                    # vehicle_transform = Transform(agent.pedestrian.bounding_box.transform)
                                    # pos = agent.pedestrian.transform.location
                                    #
                                    # bbox3d = agent.pedestrian.bounding_box.extent
                                    #
                                    # # Compute the 3D bounding boxes
                                    # # f contains the 4 points that corresponds to the bottom
                                    # f = np.array([[pos.x + bbox3d.x, pos.y - bbox3d.y, pos.z- bbox3d.z ],
                                    #               [pos.x + bbox3d.x, pos.y + bbox3d.y, pos.z- bbox3d.z ],
                                    #               [pos.x - bbox3d.x, pos.y + bbox3d.y, pos.z- bbox3d.z ],
                                    #               [pos.x - bbox3d.x, pos.y - bbox3d.y, pos.z- bbox3d.z ],
                                    #               [pos.x + bbox3d.x, pos.y - bbox3d.y, pos.z + bbox3d.z],
                                    #               [pos.x + bbox3d.x, pos.y + bbox3d.y, pos.z + bbox3d.z],
                                    #               [pos.x - bbox3d.x, pos.y + bbox3d.y, pos.z + bbox3d.z],
                                    #               [pos.x - bbox3d.x, pos.y - bbox3d.y, pos.z + bbox3d.z]])
                                    #
                                    # f_rotated = pedestrian_transform.transform_points(f)
                                    # pedestrians[agent.id]['bounding_box_coord'] = f_rotated
                                    # f_2D_rotated = []
                                    #
                                    # for i in range(f.shape[0]):
                                    #     point = np.array([[f_rotated[i, 0]], [f_rotated[i, 1]], [f_rotated[i, 2]], [1]])
                                    #     transformed_2d_pos = np.dot(Rt_inv, point)
                                    #     pos2d = np.dot(K, transformed_2d_pos[:3])
                                    #     norm_pos2d = np.array([
                                    #         pos2d[0] / pos2d[2],
                                    #         pos2d[1] / pos2d[2],
                                    #         pos2d[2]])
                                    #     f_2D_rotated.append([image_size[0] - norm_pos2d[0], image_size[1] - norm_pos2d[1]])
                                    # pedestrians[agent.id]['bounding_box_2D'] = f_2D_rotated

                            cars_dict[frame] = vehicles

                            pedestrians_dict[frame] = pedestrians
                            #print("End of Episode")
                            #print(len(pedestrians_dict[frame]))

                            # Save PLY to disk
                            # This generates the PLY string with the 3D points and the RGB colors
                            # for each row of the file.
                            if not frame % pointcloud_step:
                                point_cloud.save_to_disk(os.path.join(
                                    output_folder, '{:0>5}.ply'.format(frame))
                                )
                                point_cloud_seg.save_to_disk(os.path.join(
                                    output_folder, '{:0>5}_seg.ply'.format(frame))
                                )

                        if not args.autopilot:

                            client.send_control(
                                hand_brake=True)

                        else:

                            # Together with the measurements, the server has sent the
                            # control that the in-game autopilot would do this frame. We
                            # can enable autopilot by sending back this control to the
                            # server. We can modify it if wanted, here for instance we
                            # will add some noise to the steer.

                            control = measurements.player_measurements.autopilot_control
                            control.steer += random.uniform(-0.1, 0.1)
                            client.send_control(control)
                    print ("Start pickle save")
                    with open(output_folder + '/cameras.p', 'w') as camerafile:
                        pickle.dump(cameras_dict, camerafile)
                    with open(output_folder + '/people.p', 'w') as peoplefile:
                        pickle.dump(pedestrians_dict, peoplefile)
                    with open(output_folder + '/cars.p', 'w') as carfile:
                        pickle.dump(cars_dict, carfile)
                    print ("Episode done")
Exemple #35
0
def run_carla_client(host, port, far):
    # Here we will run a single episode with 300 frames.
    number_of_frames = 3000
    frame_step = 100  # Save one image every 100 frames
    output_folder = '_out'
    image_size = [800, 600]
    camera_local_pos = [0.3, 0.0, 1.3] # [X, Y, Z]
    camera_local_rotation = [0, 0, 0]  # [pitch(Y), yaw(Z), roll(X)]
    fov = 70

    # Connect with the server
    with make_carla_client(host, port) as client:
        print('CarlaClient connected')

        # Here we load the settings.
        settings = CarlaSettings()
        settings.set(
            SynchronousMode=True,
            SendNonPlayerAgentsInfo=False,
            NumberOfVehicles=20,
            NumberOfPedestrians=40,
            WeatherId=random.choice([1, 3, 7, 8, 14]))
        settings.randomize_seeds()

        camera1 = Camera('CameraDepth', PostProcessing='Depth', FOV=fov)
        camera1.set_image_size(*image_size)
        camera1.set_position(*camera_local_pos)
        camera1.set_rotation(*camera_local_rotation)
        settings.add_sensor(camera1)

        camera2 = Camera('CameraRGB', PostProcessing='SceneFinal', FOV=fov)
        camera2.set_image_size(*image_size)
        camera2.set_position(*camera_local_pos)
        camera2.set_rotation(*camera_local_rotation)
        settings.add_sensor(camera2)

        client.load_settings(settings)

        # Start at location index id '0'
        client.start_episode(0)

        # Compute the camera transform matrix
        camera_to_car_transform = camera2.get_unreal_transform()

        # Iterate every frame in the episode except for the first one.
        for frame in range(1, number_of_frames):
            # Read the data produced by the server this frame.
            measurements, sensor_data = client.read_data()

            # Save one image every 'frame_step' frames
            if not frame % frame_step:
                # Start transformations time mesure.
                timer = StopWatch()

                # RGB image [[[r,g,b],..[r,g,b]],..[[r,g,b],..[r,g,b]]]
                image_RGB = to_rgb_array(sensor_data['CameraRGB'])

                # 2d to (camera) local 3d
                # We use the image_RGB to colorize each 3D point, this is optional.
                # "max_depth" is used to keep only the points that are near to the
                # camera, meaning 1.0 the farest points (sky)
                point_cloud = depth_to_local_point_cloud(
                    sensor_data['CameraDepth'],
                    image_RGB,
                    max_depth=far
                )

                # (Camera) local 3d to world 3d.
                # Get the transform from the player protobuf transformation.
                world_transform = Transform(
                    measurements.player_measurements.transform
                )

                # Compute the final transformation matrix.
                car_to_world_transform = world_transform * camera_to_car_transform

                # Car to World transformation given the 3D points and the
                # transformation matrix.
                point_cloud.apply_transform(car_to_world_transform)

                # End transformations time mesure.
                timer.stop()

                # Save PLY to disk
                # This generates the PLY string with the 3D points and the RGB colors
                # for each row of the file.
                point_cloud.save_to_disk(os.path.join(
                    output_folder, '{:0>5}.ply'.format(frame))
                )

                print_message(timer.milliseconds(), len(point_cloud), frame)

            client.send_control(
                measurements.player_measurements.autopilot_control
            )
Exemple #36
0
def run_carla_client(args):
    # Here we will run 3 episodes with 300 frames each.
    number_of_episodes = 5
    cut_per_episode = 40
    frames_per_cut = 200


    # We assume the CARLA server is already waiting for a client to connect at
    # host:port. To create a connection we can use the `make_carla_client`
    # context manager, it creates a CARLA client object and starts the
    # connection. It will throw an exception if something goes wrong. The
    # context manager makes sure the connection is always cleaned up on exit.
    with make_carla_client(args.host, args.port) as client:
        print('CarlaClient connected')
        for episode in range(0, number_of_episodes):
            print("input any key to continue...")
            start = input()
            # each episode dir store a set of traindata.  if dir not existed, then make it
            pathdir = '/home/kadn/dataTrain/episode_{:0>3}'.format(episode)
            mkdir(pathdir)

            # Create a CarlaSettings object. This object is a wrapper around
            # the CarlaSettings.ini file. Here we set the configuration we
            # want for the new episode.
            settings = CarlaSettings()
            settings.set(
                SynchronousMode=True,
                SendNonPlayerAgentsInfo=True,
                NumberOfVehicles=20,
                NumberOfPedestrians=40,
                # WeatherId=random.choice([1, 3, 7, 8, 14]),
                WeatherId = 1,
                QualityLevel=args.quality_level)
            settings.randomize_seeds()
            # Now we want to add a couple of cameras to the player vehicle.
            # We will collect the images produced by these cameras every
            # frame.

            # The default camera captures RGB images of the scene.
            camera0 = Camera('CameraRGB')
            # Set image resolution in pixels.
            camera0.set_image_size(88, 200)
            # Set its position relative to the car in meters.
            camera0.set_position(0.30, 0, 1.30)
            settings.add_sensor(camera0)

            # Let's add another camera producing ground-truth depth.
            camera1 = Camera('CameraDepth', PostProcessing='Depth')
            camera1.set_image_size(200, 88)
            camera1.set_position(0.30, 0, 1.30)
            settings.add_sensor(camera1)

            camera2 = Camera('CameraSemSeg', PostProcessing='SemanticSegmentation')
            camera2.set_image_size(88, 200)
            camera2.set_position(2.0, 0.0, 1.4)
            camera2.set_rotation(0.0, 0.0, 0.0)
            settings.add_sensor(camera2)
            # if args.lidar:
            lidar = Lidar('Lidar32')
            lidar.set_position(0, 0, 2.50)
            lidar.set_rotation(0, 0, 0)
            lidar.set(
                Channels=0,
                Range=30,
                PointsPerSecond=200000,
                RotationFrequency=10,
                UpperFovLimit=0,
                LowerFovLimit=0)
            settings.add_sensor(lidar)

            # else:
            #
            #     # Alternatively, we can load these settings from a file.
            #     with open(args.settings_filepath, 'r') as fp:
            #         settings = fp.read()

            # Now we load these settings into the server. The server replies
            # with a scene description containing the available start spots for
            # the player. Here we can provide a CarlaSettings object or a
            # CarlaSettings.ini file as string.
            scene = client.load_settings(settings)

            # Choose one player start at random.
            # number_of_player_starts = len(scene.player_start_spots)
            # player_start = random.randint(0, max(0, number_of_player_starts - 1))
            player_start = 1

            # Notify the server that we want to start the episode at the
            # player_start index. This function blocks until the server is ready
            # to start the episode.
            print('Starting new episode at %r...' % scene.map_name)
            # Start a new episode.
            client.start_episode(player_start)

            for cut_per_episode in range(0,cut_per_episode):
                num = fileCounter(pathdir)
                filename = "data_{:0>6}.h5".format(num)
                filepath = pathdir + '/' + filename
                f = h5py.File(filepath, "w")
                rgb_file = f.create_dataset("rgb", (200, 88, 200), np.uint8)
                seg_file = f.create_dataset("seg", (200, 88, 200), np.uint8)
                lidar_file = f.create_dataset('lidar',(200,200,200),np.uint8)
                startendpoint = f.create_dataset('startend',(1,2),np.float32)
                targets_file = f.create_dataset("targets", (200, 28), np.float32)
                index_file = 0

                # Iterate every frame in the episode.
                for frame in range(0, frames_per_cut):
                    # Read the data produced by the server this frame.
                    measurements, sensor_data = client.read_data()

                    # get data and store
                    sensors, targets_data = record_train_data(measurements,sensor_data)
                    rgb_file[:,:,index_file] = sensors['rgb']
                    seg_file[:,:,index_file] = sensors['seg']
                    lidar_file[:,:,index_file] = sensors['lidar']
                    targets_file[index_file,:] = targets_data
                    index_file += 1

                    # We can access the encoded data of a given image as numpy
                    # array using its "data" property. For instance, to get the
                    # depth value (normalized) at pixel X, Y
                    #
                    #     depth_array = sensor_data['CameraDepth'].data
                    #     value_at_pixel = depth_array[Y, X]
                    #

                    # Now we have to send the instructions to control the vehicle.
                    # If we are in synchronous mode the server will pause the
                    # simulation until we send this control.

                    if  args.autopilot:

                        client.send_control(
                            steer=0,
                            throttle=0.8,
                            brake=0.0,
                            hand_brake=False,
                            reverse=False)

                    else:

                        # Together with the measurements, the server has sent the
                        # control that the in-game autopilot would do this frame. We
                        # can enable autopilot by sending back this control to the
                        # server. We can modify it if wanted, here for instance we
                        # will add some noise to the steer.

                        control = measurements.player_measurements.autopilot_control
                        control.steer += random.uniform(-0.05, 0.05)
                        client.send_control(control)
Exemple #37
0
def run_carla_clients(args):
    filename = '_images_repeatability/server{:d}/{:0>6d}.png'
    with make_carla_client(args.host1, args.port1) as client1:
        logging.info('1st client connected')
        with make_carla_client(args.host2, args.port2) as client2:
            logging.info('2nd client connected')

            settings = CarlaSettings()
            settings.set(
                SynchronousMode=True,
                SendNonPlayerAgentsInfo=True,
                NumberOfVehicles=50,
                NumberOfPedestrians=50,
                WeatherId=random.choice([1, 3, 7, 8, 14]))
            settings.randomize_seeds()

            if args.images_to_disk:
                camera = Camera('DefaultCamera')
                camera.set_image_size(800, 600)
                settings.add_sensor(camera)

            scene1 = client1.load_settings(settings)
            scene2 = client2.load_settings(settings)

            number_of_player_starts = len(scene1.player_start_spots)
            assert number_of_player_starts == len(scene2.player_start_spots)
            player_start = random.randint(0, max(0, number_of_player_starts - 1))
            logging.info(
                'start episode at %d/%d player start (run forever, press ctrl+c to cancel)',
                player_start,
                number_of_player_starts)

            client1.start_episode(player_start)
            client2.start_episode(player_start)

            frame = 0
            while True:
                frame += 1

                meas1, sensor_data1 = client1.read_data()
                meas2, sensor_data2 = client2.read_data()

                player1 = meas1.player_measurements
                player2 = meas2.player_measurements

                images1 = [x for x in sensor_data1.values() if isinstance(x, Image)]
                images2 = [x for x in sensor_data2.values() if isinstance(x, Image)]

                control1 = player1.autopilot_control
                control2 = player2.autopilot_control

                try:
                    assert len(images1) == len(images2)
                    assert len(meas1.non_player_agents) == len(meas2.non_player_agents)
                    assert player1.transform.location.x == player2.transform.location.x
                    assert player1.transform.location.y == player2.transform.location.y
                    assert player1.transform.location.z == player2.transform.location.z
                    assert control1.steer == control2.steer
                    assert control1.throttle == control2.throttle
                    assert control1.brake == control2.brake
                    assert control1.hand_brake == control2.hand_brake
                    assert control1.reverse == control2.reverse
                except AssertionError:
                    logging.exception('assertion failed')

                if args.images_to_disk:
                    assert len(images1) == 1
                    images1[0].save_to_disk(filename.format(1, frame))
                    images2[0].save_to_disk(filename.format(2, frame))

                client1.send_control(control1)
                client2.send_control(control2)