Exemplo n.º 1
0
    def _reset(self):
        self.num_steps = 0
        self.total_reward = 0
        self.prev_measurement = None
        self.prev_image = None
        self.episode_id = datetime.today().strftime("%Y-%m-%d_%H-%M-%S_%f")
        self.measurements_file = None

        # Create a CarlaSettings object. This object is a wrapper around
        # the CarlaSettings.ini file. Here we set the configuration we
        # want for the new episode.
        settings = CarlaSettings()
        self.scenario = random.choice(self.config["scenarios"])
        assert self.scenario["city"] == self.city, (self.scenario, self.city)
        self.weather = random.choice(self.scenario["weather_distribution"])
        settings.set(
            SynchronousMode=True,
            SendNonPlayerAgentsInfo=True,
            NumberOfVehicles=self.scenario["num_vehicles"],
            NumberOfPedestrians=self.scenario["num_pedestrians"],
            WeatherId=self.weather)
        settings.randomize_seeds()

        if self.config["use_depth_camera"]:
            camera1 = Camera("CameraDepth", PostProcessing="Depth")
            camera1.set_image_size(self.config["render_x_res"],
                                   self.config["render_y_res"])
            camera1.set_position(30, 0, 130)
            settings.add_sensor(camera1)

        camera2 = Camera("CameraRGB")
        camera2.set_image_size(self.config["render_x_res"],
                               self.config["render_y_res"])
        camera2.set_position(30, 0, 130)
        settings.add_sensor(camera2)

        # Setup start and end positions
        scene = self.client.load_settings(settings)
        positions = scene.player_start_spots
        self.start_pos = positions[self.scenario["start_pos_id"]]
        self.end_pos = positions[self.scenario["end_pos_id"]]
        self.start_coord = [
            self.start_pos.location.x // 100, self.start_pos.location.y // 100
        ]
        self.end_coord = [
            self.end_pos.location.x // 100, self.end_pos.location.y // 100
        ]
        print("Start pos {} ({}), end {} ({})".format(
            self.scenario["start_pos_id"], self.start_coord,
            self.scenario["end_pos_id"], self.end_coord))

        # Notify the server that we want to start the episode at the
        # player_start index. This function blocks until the server is ready
        # to start the episode.
        print("Starting new episode...")
        self.client.start_episode(self.scenario["start_pos_id"])

        image, py_measurements = self._read_observation()
        self.prev_measurement = py_measurements
        return self.encode_obs(self.preprocess_image(image), py_measurements)
Exemplo n.º 2
0
def get_default_carla_settings(args):
    settings = CarlaSettings(
        SynchronousMode=args.synchronous,
        SendNonPlayerAgentsInfo=False,
        NumberOfVehicles=20,
        NumberOfPedestrians=40,
        WeatherId=1)
    settings.add_sensor(Camera('Camera1'))
    return str(settings)
Exemplo n.º 3
0
    def build_experiments(self):
        """
            Creates the whole set of experiment objects,
            The experiments created depends on the selected Town.

        """

        # We check the town, based on that we define the town related parameters
        # The size of the vector is related to the number of tasks, inside each
        # task there is also multiple poses ( start end, positions )
        if self._city_name == 'Town01':
            poses_tasks = [[[7, 3]], [[138, 17]], [[140, 134]], [[140, 134]]]
            vehicles_tasks = [0, 0, 0, 20]
            pedestrians_tasks = [0, 0, 0, 50]
        else:
            poses_tasks = [[[4, 2]], [[37, 76]], [[19, 66]], [[19, 66]]]
            vehicles_tasks = [0, 0, 0, 15]
            pedestrians_tasks = [0, 0, 0, 50]

        # We set the camera
        # This single RGB camera is used on every experiment

        camera = Camera('CameraRGB')
        camera.set(FOV=100)
        camera.set_image_size(800, 600)
        camera.set_position(2.0, 0.0, 1.4)
        camera.set_rotation(-15.0, 0, 0)

        # Based on the parameters, creates a vector with experiment objects.
        experiments_vector = []
        for weather in self.weathers:

            for iteration in range(len(poses_tasks)):
                poses = poses_tasks[iteration]
                vehicles = vehicles_tasks[iteration]
                pedestrians = pedestrians_tasks[iteration]

                conditions = CarlaSettings()
                conditions.set(
                    SendNonPlayerAgentsInfo=True,
                    NumberOfVehicles=vehicles,
                    NumberOfPedestrians=pedestrians,
                    WeatherId=weather

                )
                # Add all the cameras that were set for this experiments
                conditions.add_sensor(camera)
                experiment = Experiment()
                experiment.set(
                    Conditions=conditions,
                    Poses=poses,
                    Task=iteration,
                    Repetitions=1
                )
                experiments_vector.append(experiment)

        return experiments_vector
Exemplo n.º 4
0
    def build_experiments(self):
        """
        Creates the whole set of experiment objects,
        The experiments created depend on the selected Town.


        """

        # We set the camera
        # This single RGB camera is used on every experiment

        camera = Camera('CameraRGB')
        camera.set(FOV=100)
        camera.set_image_size(800, 600)
        camera.set_position(2.0, 0.0, 1.4)
        camera.set_rotation(-15.0, 0, 0)

        if self._city_name == 'Town01':
            poses_tasks = self._poses_town01()
            vehicles_tasks = [0, 0, 0, 20]
            pedestrians_tasks = [0, 0, 0, 50]
        else:
            poses_tasks = self._poses_town02()
            vehicles_tasks = [0, 0, 0, 15]
            pedestrians_tasks = [0, 0, 0, 50]

        experiments_vector = []

        for weather in self.weathers:

            for iteration in range(len(poses_tasks)):
                poses = poses_tasks[iteration]
                vehicles = vehicles_tasks[iteration]
                pedestrians = pedestrians_tasks[iteration]

                conditions = CarlaSettings()
                conditions.set(
                    SendNonPlayerAgentsInfo=True,
                    NumberOfVehicles=vehicles,
                    NumberOfPedestrians=pedestrians,
                    WeatherId=weather
                )
                # Add all the cameras that were set for this experiments

                conditions.add_sensor(camera)

                experiment = Experiment()
                experiment.set(
                    Conditions=conditions,
                    Poses=poses,
                    Task=iteration,
                    Repetitions=1
                )
                experiments_vector.append(experiment)

        return experiments_vector
Exemplo n.º 5
0
def make_carla_settings(args):
    """Make a CarlaSettings object with the settings we need."""
    settings = CarlaSettings()
    settings.set(
        SynchronousMode=True,
        SendNonPlayerAgentsInfo=False,
        NumberOfVehicles=30,
        NumberOfPedestrians=0,
        WeatherId=1,
        QualityLevel='Epic')
    settings.randomize_seeds()
    camera0 = sensor.Camera('CameraRGB')
    camera0.set_image_size(CAM_WINDOW_WIDTH, CAM_WINDOW_HEIGHT)
    camera0.set_position(2.0, 0.0, 1.4)
    camera0.set_rotation(-15.0, 0.0, 0.0)
    settings.add_sensor(camera0)
    camera1 = sensor.Camera('CameraDepth', PostProcessing='Depth')
    camera1.set_image_size(MINI_WINDOW_WIDTH, MINI_WINDOW_HEIGHT)
    camera1.set_position(2.0, 0.0, 1.4)
    camera1.set_rotation(0.0, 0.0, 0.0)
    settings.add_sensor(camera1)
    camera2 = sensor.Camera('CameraSemSeg', PostProcessing='SemanticSegmentation')
    camera2.set_image_size(MINI_WINDOW_WIDTH, MINI_WINDOW_HEIGHT)
    camera2.set_position(2.0, 0.0, 1.4)
    camera2.set_rotation(0.0, 0.0, 0.0)
    settings.add_sensor(camera2)
    lidar = sensor.Lidar('Lidar32')
    lidar.set_position(0, 0, 2.5)
    lidar.set_rotation(0, 0, 0)
    lidar.set(
        Channels=0,
        Range=50,
        PointsPerSecond=100000,
        RotationFrequency=10,
        UpperFovLimit=0,
        LowerFovLimit=0)
    settings.add_sensor(lidar)
    return settings
Exemplo n.º 6
0
def make_carla_settings():
    """Make a CarlaSettings object with the settings we need."""

    settings = CarlaSettings()
    settings.set(
        SendNonPlayerAgentsInfo=True,
        SynchronousMode=True,
        NumberOfVehicles=30,
        NumberOfPedestrians=50,
        WeatherId=1)

    settings.set(DisableTwoWheeledVehicles=True)

    settings.randomize_seeds() # IMPORTANT TO RANDOMIZE THE SEEDS EVERY TIME
    camera0 = sensor.Camera('CentralSemanticSeg', PostProcessing='SemanticSegmentation')
    camera0.set_image_size(WINDOW_WIDTH, WINDOW_HEIGHT)
    camera0.set(FOV=FOV)
    camera0.set_position(2.0, 0.0, 1.4)
    camera0.set_rotation(-15.0, 0, 0)

    settings.add_sensor(camera0)
    camera0 = sensor.Camera('CentralRGB')
    camera0.set_image_size(WINDOW_WIDTH, WINDOW_HEIGHT)
    camera0.set(FOV=FOV)
    camera0.set_position(2.0, 0.0, 1.4)
    camera0.set_rotation(-15.0, 0, 0)

    settings.add_sensor(camera0)
    camera0 = sensor.Camera('CentralDepth', PostProcessing='Depth')
    camera0.set_image_size(WINDOW_WIDTH, WINDOW_HEIGHT)
    camera0.set(FOV=FOV)
    camera0.set_position(2.0, 0.0, 1.4)
    camera0.set_rotation(-15.0, 0, 0)

    settings.add_sensor(camera0)

    camera0 = sensor.Camera('LeftSemanticSeg', PostProcessing='SemanticSegmentation')
    camera0.set_image_size(WINDOW_WIDTH, WINDOW_HEIGHT)
    camera0.set(FOV=FOV)
    camera0.set_position(2.0, 0.0, 1.4)
    camera0.set_rotation(-15.0, -30.0, 0)

    settings.add_sensor(camera0)
    camera0 = sensor.Camera('LeftRGB')
    camera0.set_image_size(WINDOW_WIDTH, WINDOW_HEIGHT)
    camera0.set(FOV=FOV)
    camera0.set_position(2.0, 0.0, 1.4)
    camera0.set_rotation(-15.0, -30.0, 0)

    settings.add_sensor(camera0)
    camera0 = sensor.Camera('LeftDepth', PostProcessing='Depth')
    camera0.set_image_size(WINDOW_WIDTH, WINDOW_HEIGHT)
    camera0.set(FOV=FOV)
    camera0.set_position(2.0, 0.0, 1.4)
    camera0.set_rotation(-15.0, -30.0, 0)

    settings.add_sensor(camera0)

    camera0 = sensor.Camera('RightSemanticSeg', PostProcessing='SemanticSegmentation')
    camera0.set_image_size(WINDOW_WIDTH, WINDOW_HEIGHT)
    camera0.set(FOV=FOV)
    camera0.set_position(2.0, 0.0, 1.4)
    camera0.set_rotation(-15.0, 30.0, 0)

    settings.add_sensor(camera0)
    camera0 = sensor.Camera('RightRGB')
    camera0.set_image_size(WINDOW_WIDTH, WINDOW_HEIGHT)
    camera0.set(FOV=FOV)
    camera0.set_position(2.0, 0.0, 1.4)
    camera0.set_rotation(-15.0, 30.0, 0)

    settings.add_sensor(camera0)
    camera0 = sensor.Camera('RightDepth', PostProcessing='Depth')
    camera0.set_image_size(WINDOW_WIDTH, WINDOW_HEIGHT)
    camera0.set(FOV=FOV)
    camera0.set_position(2.0, 0.0, 1.4)
    camera0.set_rotation(-15.0, 30.0, 0)

    settings.add_sensor(camera0)

    lidar = sensor.Lidar('Lidar32')
    lidar.set_position(0, 0, 2.5)
    lidar.set_rotation(0, 0, 0)
    lidar.set(
        Channels=32,
        Range=50,
        PointsPerSecond=100000,
        RotationFrequency=10,
        UpperFovLimit=10,
        LowerFovLimit=-30)
    settings.add_sensor(lidar)
    return settings
Exemplo n.º 7
0
def run(host, port, dagger):
    n_episodes = args.episodes
    frames_per_episode = args.frames
    image_dims = (480, 240)

    with make_carla_client(host, port) as client:
        print('CarlaClient connected')

        # Start a new episode.
        settings = CarlaSettings()
        settings.set(SynchronousMode=False,
                     SendNonPlayerAgentsInfo=True,
                     NumberOfVehicles=25,
                     NumberOfPedestrians=25,
                     WeatherId=np.random.choice(14))
        settings.randomize_seeds()

        # add 1st camera
        camera0 = Camera('CameraRGB')
        camera0.set_image_size(image_dims[0], image_dims[1])
        camera0.set_position(175, 0, 140)
        camera0.set_rotation(-12, 0, 0)
        settings.add_sensor(camera0)

        # Choose one player start at random.
        scene = client.load_settings(settings)
        number_of_player_starts = len(scene.player_start_spots)

        expert_controls = []
        for episode in range(n_episodes):
            print('Starting new episode:', episode)
            client.start_episode(np.random.randint(number_of_player_starts -
                                                   1))

            # Iterate every frame in the episode.
            for frame in range(frames_per_episode):
                # Read the data produced by the server in this frame.
                measurements, sensor_data = client.read_data()

                image = sensor_data['CameraRGB']

                if dagger:
                    image.save_to_disk(args.dir + '/image_{:0>5d}.jpg'.format(
                        episode * frames_per_episode + frame))
                    control = measurements.player_measurements.autopilot_control
                    expert_controls.append(
                        (control.steer + 30 / 70, control.throttle,
                         control.brake,
                         measurements.player_measurements.forward_speed))

                image = np.expand_dims(np.array(image.data), axis=0)
                image, _ = network_handler.flip_data(X=image)
                image, _ = network_handler.normalize_data(X=image)
                predicted_controls = network_handler.predict_controls(
                    argparser.parse_args().network, image)[0]
                _, predicted_controls = network_handler.unnormalize_data(
                    y=predicted_controls)
                _, predicted_controls = network_handler.flip_data(
                    y=predicted_controls)
                steer, throttle = predicted_controls[0], predicted_controls[1]

                print('steer: ', steer, 'throttle: ', throttle)

                if measurements.player_measurements.forward_speed > 20:
                    throttle = 0

                # send control to simulator
                client.send_control(steer=steer,
                                    throttle=throttle,
                                    brake=0.0,
                                    hand_brake=False,
                                    reverse=False)

                if dagger:
                    np.save(args.dir + '/controls.npy', expert_controls)
Exemplo n.º 8
0
def run_carla_clients(args):
    filename = '_images_repeatability/server{:d}/{:0>6d}.png'
    with make_carla_client(args.host1, args.port1) as client1:
        logging.info('1st client connected')
        with make_carla_client(args.host2, args.port2) as client2:
            logging.info('2nd client connected')

            settings = CarlaSettings()
            settings.set(
                SynchronousMode=True,
                SendNonPlayerAgentsInfo=True,
                NumberOfVehicles=50,
                NumberOfPedestrians=50,
                WeatherId=random.choice([1, 3, 7, 8, 14]))
            settings.randomize_seeds()

            if args.images_to_disk:
                camera = Camera('DefaultCamera')
                camera.set_image_size(800, 600)
                settings.add_sensor(camera)

            scene1 = client1.load_settings(settings)
            scene2 = client2.load_settings(settings)

            number_of_player_starts = len(scene1.player_start_spots)
            assert number_of_player_starts == len(scene2.player_start_spots)
            player_start = random.randint(0, max(0, number_of_player_starts - 1))
            logging.info(
                'start episode at %d/%d player start (run forever, press ctrl+c to cancel)',
                player_start,
                number_of_player_starts)

            client1.start_episode(player_start)
            client2.start_episode(player_start)

            frame = 0
            while True:
                frame += 1

                meas1, sensor_data1 = client1.read_data()
                meas2, sensor_data2 = client2.read_data()

                player1 = meas1.player_measurements
                player2 = meas2.player_measurements

                images1 = [x for x in sensor_data1.values() if isinstance(x, Image)]
                images2 = [x for x in sensor_data2.values() if isinstance(x, Image)]

                control1 = player1.autopilot_control
                control2 = player2.autopilot_control

                try:
                    assert len(images1) == len(images2)
                    assert len(meas1.non_player_agents) == len(meas2.non_player_agents)
                    assert player1.transform.location.x == player2.transform.location.x
                    assert player1.transform.location.y == player2.transform.location.y
                    assert player1.transform.location.z == player2.transform.location.z
                    assert control1.steer == control2.steer
                    assert control1.throttle == control2.throttle
                    assert control1.brake == control2.brake
                    assert control1.hand_brake == control2.hand_brake
                    assert control1.reverse == control2.reverse
                except AssertionError:
                    logging.exception('assertion failed')

                if args.images_to_disk:
                    assert len(images1) == 1
                    images1[0].save_to_disk(filename.format(1, frame))
                    images2[0].save_to_disk(filename.format(2, frame))

                client1.send_control(control1)
                client2.send_control(control2)
Exemplo n.º 9
0
def run_carla_client(host, port, far):
    # Here we will run a single episode with 300 frames.
    number_of_frames = 3000
    frame_step = 100  # Save one image every 100 frames
    output_folder = '_out'
    image_size = [800, 600]
    camera_local_pos = [30, 0, 130]  # [X, Y, Z]
    camera_local_rotation = [0, 0, 0]  # [pitch(Y), yaw(Z), roll(X)]
    fov = 70

    # Connect with the server
    with make_carla_client(host, port) as client:
        print('CarlaClient connected')

        # Here we load the settings.
        settings = CarlaSettings()
        settings.set(SynchronousMode=True,
                     SendNonPlayerAgentsInfo=False,
                     NumberOfVehicles=20,
                     NumberOfPedestrians=40,
                     WeatherId=random.choice([1, 3, 7, 8, 14]))
        settings.randomize_seeds()

        camera1 = Camera('CameraDepth', PostProcessing='Depth', FOV=fov)
        camera1.set_image_size(*image_size)
        camera1.set_position(*camera_local_pos)
        camera1.set_rotation(*camera_local_rotation)
        settings.add_sensor(camera1)

        camera2 = Camera('CameraRGB', PostProcessing='SceneFinal', FOV=fov)
        camera2.set_image_size(*image_size)
        camera2.set_position(*camera_local_pos)
        camera2.set_rotation(*camera_local_rotation)
        settings.add_sensor(camera2)

        client.load_settings(settings)

        # Start at location index id '0'
        client.start_episode(0)

        # Compute the camera transform matrix
        camera_to_car_transform = camera2.get_unreal_transform()

        # Iterate every frame in the episode except for the first one.
        for frame in range(1, number_of_frames):
            # Read the data produced by the server this frame.
            measurements, sensor_data = client.read_data()

            # Save one image every 'frame_step' frames
            if not frame % frame_step:
                # Start transformations time mesure.
                timer = StopWatch()

                # RGB image [[[r,g,b],..[r,g,b]],..[[r,g,b],..[r,g,b]]]
                image_RGB = to_rgb_array(sensor_data['CameraRGB'])

                # 2d to (camera) local 3d
                # We use the image_RGB to colorize each 3D point, this is optional.
                # "max_depth" is used to keep only the points that are near to the
                # camera, meaning 1.0 the farest points (sky)
                point_cloud = depth_to_local_point_cloud(
                    sensor_data['CameraDepth'], image_RGB, max_depth=far)

                # (Camera) local 3d to world 3d.
                # Get the transform from the player protobuf transformation.
                world_transform = Transform(
                    measurements.player_measurements.transform)

                # Compute the final transformation matrix.
                car_to_world_transform = world_transform * camera_to_car_transform

                # Car to World transformation given the 3D points and the
                # transformation matrix.
                point_cloud.apply_transform(car_to_world_transform)

                # End transformations time mesure.
                timer.stop()

                # Save PLY to disk
                # This generates the PLY string with the 3D points and the RGB colors
                # for each row of the file.
                point_cloud.save_to_disk(
                    os.path.join(output_folder, '{:0>5}.ply'.format(frame)))

                print_message(timer.milliseconds(), len(point_cloud), frame)

            client.send_control(
                measurements.player_measurements.autopilot_control)
Exemplo n.º 10
0
def run_carla_client(args):
    # Here we will run 3 episodes with 300 frames each.
    number_of_episodes = 5
    cut_per_episode = 40
    frames_per_cut = 200


    # We assume the CARLA server is already waiting for a client to connect at
    # host:port. To create a connection we can use the `make_carla_client`
    # context manager, it creates a CARLA client object and starts the
    # connection. It will throw an exception if something goes wrong. The
    # context manager makes sure the connection is always cleaned up on exit.
    with make_carla_client(args.host, args.port) as client:
        print('CarlaClient connected')
        for episode in range(0, number_of_episodes):
            print("input any key to continue...")
            start = input()
            # each episode dir store a set of traindata.  if dir not existed, then make it
            pathdir = '/home/kadn/dataTrain/episode_{:0>3}'.format(episode)
            mkdir(pathdir)

            # Create a CarlaSettings object. This object is a wrapper around
            # the CarlaSettings.ini file. Here we set the configuration we
            # want for the new episode.
            settings = CarlaSettings()
            settings.set(
                SynchronousMode=True,
                SendNonPlayerAgentsInfo=True,
                NumberOfVehicles=20,
                NumberOfPedestrians=40,
                # WeatherId=random.choice([1, 3, 7, 8, 14]),
                WeatherId = 1,
                QualityLevel=args.quality_level)
            settings.randomize_seeds()
            # Now we want to add a couple of cameras to the player vehicle.
            # We will collect the images produced by these cameras every
            # frame.

            # The default camera captures RGB images of the scene.
            camera0 = Camera('CameraRGB')
            # Set image resolution in pixels.
            camera0.set_image_size(88, 200)
            # Set its position relative to the car in meters.
            camera0.set_position(0.30, 0, 1.30)
            settings.add_sensor(camera0)

            # Let's add another camera producing ground-truth depth.
            camera1 = Camera('CameraDepth', PostProcessing='Depth')
            camera1.set_image_size(200, 88)
            camera1.set_position(0.30, 0, 1.30)
            settings.add_sensor(camera1)

            camera2 = Camera('CameraSemSeg', PostProcessing='SemanticSegmentation')
            camera2.set_image_size(88, 200)
            camera2.set_position(2.0, 0.0, 1.4)
            camera2.set_rotation(0.0, 0.0, 0.0)
            settings.add_sensor(camera2)
            # if args.lidar:
            lidar = Lidar('Lidar32')
            lidar.set_position(0, 0, 2.50)
            lidar.set_rotation(0, 0, 0)
            lidar.set(
                Channels=0,
                Range=30,
                PointsPerSecond=200000,
                RotationFrequency=10,
                UpperFovLimit=0,
                LowerFovLimit=0)
            settings.add_sensor(lidar)

            # else:
            #
            #     # Alternatively, we can load these settings from a file.
            #     with open(args.settings_filepath, 'r') as fp:
            #         settings = fp.read()

            # Now we load these settings into the server. The server replies
            # with a scene description containing the available start spots for
            # the player. Here we can provide a CarlaSettings object or a
            # CarlaSettings.ini file as string.
            scene = client.load_settings(settings)

            # Choose one player start at random.
            # number_of_player_starts = len(scene.player_start_spots)
            # player_start = random.randint(0, max(0, number_of_player_starts - 1))
            player_start = 1

            # Notify the server that we want to start the episode at the
            # player_start index. This function blocks until the server is ready
            # to start the episode.
            print('Starting new episode at %r...' % scene.map_name)
            # Start a new episode.
            client.start_episode(player_start)

            for cut_per_episode in range(0,cut_per_episode):
                num = fileCounter(pathdir)
                filename = "data_{:0>6}.h5".format(num)
                filepath = pathdir + '/' + filename
                f = h5py.File(filepath, "w")
                rgb_file = f.create_dataset("rgb", (200, 88, 200), np.uint8)
                seg_file = f.create_dataset("seg", (200, 88, 200), np.uint8)
                lidar_file = f.create_dataset('lidar',(200,200,200),np.uint8)
                startendpoint = f.create_dataset('startend',(1,2),np.float32)
                targets_file = f.create_dataset("targets", (200, 28), np.float32)
                index_file = 0

                # Iterate every frame in the episode.
                for frame in range(0, frames_per_cut):
                    # Read the data produced by the server this frame.
                    measurements, sensor_data = client.read_data()

                    # get data and store
                    sensors, targets_data = record_train_data(measurements,sensor_data)
                    rgb_file[:,:,index_file] = sensors['rgb']
                    seg_file[:,:,index_file] = sensors['seg']
                    lidar_file[:,:,index_file] = sensors['lidar']
                    targets_file[index_file,:] = targets_data
                    index_file += 1

                    # We can access the encoded data of a given image as numpy
                    # array using its "data" property. For instance, to get the
                    # depth value (normalized) at pixel X, Y
                    #
                    #     depth_array = sensor_data['CameraDepth'].data
                    #     value_at_pixel = depth_array[Y, X]
                    #

                    # Now we have to send the instructions to control the vehicle.
                    # If we are in synchronous mode the server will pause the
                    # simulation until we send this control.

                    if  args.autopilot:

                        client.send_control(
                            steer=0,
                            throttle=0.8,
                            brake=0.0,
                            hand_brake=False,
                            reverse=False)

                    else:

                        # Together with the measurements, the server has sent the
                        # control that the in-game autopilot would do this frame. We
                        # can enable autopilot by sending back this control to the
                        # server. We can modify it if wanted, here for instance we
                        # will add some noise to the steer.

                        control = measurements.player_measurements.autopilot_control
                        control.steer += random.uniform(-0.05, 0.05)
                        client.send_control(control)
Exemplo n.º 11
0
def run_carla_client(host, port, far):
    # Here we will run a single episode with 300 frames.
    number_of_frames = 3000
    frame_step = 100  # Save one image every 100 frames
    output_folder = '_out'
    image_size = [800, 600]
    camera_local_pos = [0.3, 0.0, 1.3] # [X, Y, Z]
    camera_local_rotation = [0, 0, 0]  # [pitch(Y), yaw(Z), roll(X)]
    fov = 70

    # Connect with the server
    with make_carla_client(host, port) as client:
        print('CarlaClient connected')

        # Here we load the settings.
        settings = CarlaSettings()
        settings.set(
            SynchronousMode=True,
            SendNonPlayerAgentsInfo=False,
            NumberOfVehicles=20,
            NumberOfPedestrians=40,
            WeatherId=random.choice([1, 3, 7, 8, 14]))
        settings.randomize_seeds()

        camera1 = Camera('CameraDepth', PostProcessing='Depth', FOV=fov)
        camera1.set_image_size(*image_size)
        camera1.set_position(*camera_local_pos)
        camera1.set_rotation(*camera_local_rotation)
        settings.add_sensor(camera1)

        camera2 = Camera('CameraRGB', PostProcessing='SceneFinal', FOV=fov)
        camera2.set_image_size(*image_size)
        camera2.set_position(*camera_local_pos)
        camera2.set_rotation(*camera_local_rotation)
        settings.add_sensor(camera2)

        client.load_settings(settings)

        # Start at location index id '0'
        client.start_episode(0)

        # Compute the camera transform matrix
        camera_to_car_transform = camera2.get_unreal_transform()

        # Iterate every frame in the episode except for the first one.
        for frame in range(1, number_of_frames):
            # Read the data produced by the server this frame.
            measurements, sensor_data = client.read_data()

            # Save one image every 'frame_step' frames
            if not frame % frame_step:
                # Start transformations time mesure.
                timer = StopWatch()

                # RGB image [[[r,g,b],..[r,g,b]],..[[r,g,b],..[r,g,b]]]
                image_RGB = to_rgb_array(sensor_data['CameraRGB'])

                # 2d to (camera) local 3d
                # We use the image_RGB to colorize each 3D point, this is optional.
                # "max_depth" is used to keep only the points that are near to the
                # camera, meaning 1.0 the farest points (sky)
                point_cloud = depth_to_local_point_cloud(
                    sensor_data['CameraDepth'],
                    image_RGB,
                    max_depth=far
                )

                # (Camera) local 3d to world 3d.
                # Get the transform from the player protobuf transformation.
                world_transform = Transform(
                    measurements.player_measurements.transform
                )

                # Compute the final transformation matrix.
                car_to_world_transform = world_transform * camera_to_car_transform

                # Car to World transformation given the 3D points and the
                # transformation matrix.
                point_cloud.apply_transform(car_to_world_transform)

                # End transformations time mesure.
                timer.stop()

                # Save PLY to disk
                # This generates the PLY string with the 3D points and the RGB colors
                # for each row of the file.
                point_cloud.save_to_disk(os.path.join(
                    output_folder, '{:0>5}.ply'.format(frame))
                )

                print_message(timer.milliseconds(), len(point_cloud), frame)

            client.send_control(
                measurements.player_measurements.autopilot_control
            )
Exemplo n.º 12
0
def run_carla_client(args):
    with make_connection(CarlaClient, args.host, args.port, timeout=15) as client:
        logging.info('CarlaClient connected')
        filename = '_out/test_episode_{:0>4d}/{:s}/{:0>6d}'
        frames_per_episode = 300
        episode = 0
        while True:
            episode += 1
            settings = CarlaSettings()
            settings.set(SendNonPlayerAgentsInfo=True, SynchronousMode=args.synchronous)
            settings.randomize_seeds()
            camera = sensor.Camera('DefaultCamera')
            camera.set_image_size(300, 200) # Do not change this, hard-coded in test.
            settings.add_sensor(camera)
            lidar = sensor.Lidar('DefaultLidar')
            settings.add_sensor(lidar)

            logging.debug('sending CarlaSettings:\n%s', settings)
            logging.info('new episode requested')

            scene = client.load_settings(settings)

            number_of_player_starts = len(scene.player_start_spots)
            player_start = random.randint(0, max(0, number_of_player_starts - 1))
            logging.info(
                'start episode at %d/%d player start (%d frames)',
                player_start,
                number_of_player_starts,
                frames_per_episode)

            client.start_episode(player_start)

            use_autopilot_control = (random.random() < 0.5)
            reverse = (random.random() < 0.2)

            for frame in range(0, frames_per_episode):
                logging.debug('reading measurements...')
                measurements, sensor_data = client.read_data()
                images = [x for x in sensor_data.values() if isinstance(x, sensor.Image)]

                logging.debug('received data of %d agents', len(measurements.non_player_agents))
                if len(images) > 0:
                    assert (images[0].width, images[0].height) == (camera.ImageSizeX, camera.ImageSizeY)

                if args.images_to_disk:
                    for name, data in sensor_data.items():
                        data.save_to_disk(filename.format(episode, name, frame))

                logging.debug('sending control...')
                control = measurements.player_measurements.autopilot_control
                if not use_autopilot_control:
                    control.steer = random.uniform(-1.0, 1.0)
                    control.throttle = 0.3
                    control.hand_brake = False
                    control.reverse = reverse
                client.send_control(
                    steer=control.steer,
                    throttle=control.throttle,
                    brake=control.brake,
                    hand_brake=control.hand_brake,
                    reverse=control.reverse)
Exemplo n.º 13
0
def make_carla_settings():
    """Make a CarlaSettings object with the settings we need."""
    settings = CarlaSettings()
    settings.set(SynchronousMode=True,
                 SendNonPlayerAgentsInfo=True,
                 NumberOfVehicles=120,
                 NumberOfPedestrians=140,
                 WeatherId=weather_choice,
                 SeedVehicles=random.randint(0, 123456),
                 SeedPedestrians=random.randint(0, 123456))

    # settings.randomize_seeds()
    camera0 = sensor.Camera('CameraFront')
    camera0.set(FOV=100)
    camera0.set_image_size(WINDOW_WIDTH, WINDOW_HEIGHT)
    camera0.set_position(1.35, 0, 1.40)
    camera0.set_rotation(-15.0, 0.0, 0.0)
    settings.add_sensor(camera0)
    camera1 = sensor.Camera('CameraLeft')
    camera1.set_image_size(MINI_WINDOW_WIDTH, MINI_WINDOW_HEIGHT)
    camera1.set_position(1.30, -0.50, 1.40)
    camera1.set_rotation(-20.0, 300.0, 0.0)
    settings.add_sensor(camera1)
    camera2 = sensor.Camera('CameraRight')
    camera2.set_image_size(MINI_WINDOW_WIDTH, MINI_WINDOW_HEIGHT)
    camera2.set_position(1.30, 0.50, 1.40)
    camera2.set_rotation(-20.0, 60.0, 0.0)
    settings.add_sensor(camera2)

    # camera3 = sensor.Camera('CameraFrontDepth', PostProcessing='Depth')
    # camera3.set(FOV=100)
    # camera3.set_image_size(WINDOW_WIDTH, WINDOW_HEIGHT)
    # camera3.set_position(135, 0, 140)
    # camera3.set_rotation(-15.0, 0.0, 0.0)
    # settings.add_sensor(camera3)
    # camera4 = sensor.Camera('CameraLeftDepth', PostProcessing='Depth')
    # camera4.set_image_size(MINI_WINDOW_WIDTH, MINI_WINDOW_HEIGHT)
    # camera4.set_position(130, -50, 140)
    # camera4.set_rotation(-20.0, 300.0, 0.0)
    # settings.add_sensor(camera4)
    # camera5 = sensor.Camera('CameraRightDepth', PostProcessing='Depth')
    # camera5.set_image_size(MINI_WINDOW_WIDTH, MINI_WINDOW_HEIGHT)
    # camera5.set_position(130, 50, 140)
    # camera5.set_rotation(-20.0, 60.0, 0.0)
    # settings.add_sensor(camera5)

    # camera6 = sensor.Camera('CameraFrontSeg', PostProcessing='SemanticSegmentation')
    # camera6.set(FOV=100)
    # camera6.set_image_size(WINDOW_WIDTH, WINDOW_HEIGHT)
    # camera6.set_position(135, 0, 140)
    # camera6.set_rotation(-15.0, 0.0, 0.0)
    # settings.add_sensor(camera6)
    # camera7 = sensor.Camera('CameraLeftSeg', PostProcessing='SemanticSegmentation')
    # camera7.set_image_size(MINI_WINDOW_WIDTH, MINI_WINDOW_HEIGHT)
    # camera7.set_position(130, -50, 140)
    # camera7.set_rotation(-20.0, 300.0, 0.0)
    # settings.add_sensor(camera7)
    # camera8 = sensor.Camera('CameraRightSeg', PostProcessing='SemanticSegmentation')
    # camera8.set_image_size(MINI_WINDOW_WIDTH, MINI_WINDOW_HEIGHT)
    # camera8.set_position(130, 50, 140)
    # camera8.set_rotation(-20.0, 60.0, 0.0)
    # settings.add_sensor(camera8)

    return settings
Exemplo n.º 14
0
def run_carla_client(args):
    # Here we will run 3 episodes with 300 frames each.
    number_of_episodes = 3
    frames_per_episode = 300

    # We assume the CARLA server is already waiting for a client to connect at
    # host:port. To create a connection we can use the `make_carla_client`
    # context manager, it creates a CARLA client object and starts the
    # connection. It will throw an exception if something goes wrong. The
    # context manager makes sure the connection is always cleaned up on exit.
    with make_carla_client(args.host, args.port) as client:
        print('CarlaClient connected')

        for episode in range(0, number_of_episodes):
            # Start a new episode.

            if args.settings_filepath is None:

                # Create a CarlaSettings object. This object is a wrapper around
                # the CarlaSettings.ini file. Here we set the configuration we
                # want for the new episode.
                settings = CarlaSettings()
                settings.set(
                    SynchronousMode=True,
                    SendNonPlayerAgentsInfo=True,
                    NumberOfVehicles=20,
                    NumberOfPedestrians=40,
                    WeatherId=random.choice([1, 3, 7, 8, 14]),
                    QualityLevel=args.quality_level)
                settings.randomize_seeds()

                # Now we want to add a couple of cameras to the player vehicle.
                # We will collect the images produced by these cameras every
                # frame.

                # The default camera captures RGB images of the scene.
                camera0 = Camera('CameraRGB')
                # Set image resolution in pixels.
                camera0.set_image_size(800, 600)
                # Set its position relative to the car in meters.
                camera0.set_position(0.30, 0, 1.30)
                settings.add_sensor(camera0)

                # Let's add another camera producing ground-truth depth.
                camera1 = Camera('CameraDepth', PostProcessing='Depth')
                camera1.set_image_size(800, 600)
                camera1.set_position(0.30, 0, 1.30)
                settings.add_sensor(camera1)

                if args.lidar:
                    lidar = Lidar('Lidar32')
                    lidar.set_position(0, 0, 2.50)
                    lidar.set_rotation(0, 0, 0)
                    lidar.set(
                        Channels=32,
                        Range=50,
                        PointsPerSecond=100000,
                        RotationFrequency=10,
                        UpperFovLimit=10,
                        LowerFovLimit=-30)
                    settings.add_sensor(lidar)

            else:

                # Alternatively, we can load these settings from a file.
                with open(args.settings_filepath, 'r') as fp:
                    settings = fp.read()

            # Now we load these settings into the server. The server replies
            # with a scene description containing the available start spots for
            # the player. Here we can provide a CarlaSettings object or a
            # CarlaSettings.ini file as string.
            scene = client.load_settings(settings)

            # Choose one player start at random.
            number_of_player_starts = len(scene.player_start_spots)
            player_start = random.randint(0, max(0, number_of_player_starts - 1))

            # Notify the server that we want to start the episode at the
            # player_start index. This function blocks until the server is ready
            # to start the episode.
            print('Starting new episode at %r...' % scene.map_name)
            client.start_episode(player_start)

            # Iterate every frame in the episode.
            for frame in range(0, frames_per_episode):

                # Read the data produced by the server this frame.
                measurements, sensor_data = client.read_data()

                # Print some of the measurements.
                print_measurements(measurements)

                # Save the images to disk if requested.
                if args.save_images_to_disk:
                    for name, measurement in sensor_data.items():
                        filename = args.out_filename_format.format(episode, name, frame)
                        measurement.save_to_disk(filename)

                # We can access the encoded data of a given image as numpy
                # array using its "data" property. For instance, to get the
                # depth value (normalized) at pixel X, Y
                #
                #     depth_array = sensor_data['CameraDepth'].data
                #     value_at_pixel = depth_array[Y, X]
                #

                # Now we have to send the instructions to control the vehicle.
                # If we are in synchronous mode the server will pause the
                # simulation until we send this control.

                if not args.autopilot:

                    client.send_control(
                        steer=random.uniform(-1.0, 1.0),
                        throttle=0.5,
                        brake=0.0,
                        hand_brake=False,
                        reverse=False)

                else:

                    # Together with the measurements, the server has sent the
                    # control that the in-game autopilot would do this frame. We
                    # can enable autopilot by sending back this control to the
                    # server. We can modify it if wanted, here for instance we
                    # will add some noise to the steer.

                    control = measurements.player_measurements.autopilot_control
                    control.steer += random.uniform(-0.1, 0.1)
                    client.send_control(control)
Exemplo n.º 15
0
    def build_experiments(self):
        """
            Creates the whole set of experiment objects,
            The experiments created depends on the selected Town.

        """

        # We check the town, based on that we define the town related parameters
        # The size of the vector is related to the number of tasks, inside each
        # task there is also multiple poses ( start end, positions )
        if self._city_name == 'Town01':
            poses_tasks = [[[7, 3]], [[138, 17]], [[140, 134]], [[140, 134]]]
            vehicles_tasks = [0, 0, 0, 20]
            pedestrians_tasks = [0, 0, 0, 50]
        else:
            right_curves =  [[[1,56],[65,69],[78,51],[44,61],[40,17],[71,16],[74,38],[46,12],
                              [19,18],[26,74],[37,76],[11,44],[20,6],[10,22],[28,2],[5,15],
                              [14,33],[34,8]]]      
            left_curves =  [[[57,82],[72,43],[52,79],[70,66],[43,14],[11,47],[79,32],[37,75],
                             [75,16],[26,73],[39,5],[2,37],[34,13],[6,35],[10,19],[23,6],
                             [5,30],[16,2]]] 
            special_test = [[[19, 66], [79, 14],[42, 13], [31, 71], 
                             [54, 30], [10, 61], [66, 3], [27, 12],
                             [2, 29], [16, 14],[70, 73], [46, 67],
                             [51, 81], [56, 65], [43, 54]]]
            corl_task2 = [[[19, 66], [79, 14], [19, 57], [23, 1],
                    [53, 76], [42, 13], [31, 71], [33, 5],
                    [54, 30], [10, 61], [66, 3], [27, 12],
                    [79, 19], [2, 29], [16, 14], [5, 57],
                    [70, 73], [46, 67], [57, 50], [61, 49], [21, 12],
                    [51, 81], [77, 68], [56, 65], [43, 54]]]

            poses_tasks = corl_task2
            vehicles_tasks = [0]*len(poses_tasks[0])
            pedestrians_tasks = [0]*len(poses_tasks[0])

        # We set the camera
        # This single RGB camera is used on every experiment

        camera = Camera('CameraRGB')
        camera.set(FOV=90)
        camera.set_image_size(800, 600)
        camera.set_position(1.44, 0.0, 1.2)
        camera.set_rotation(0, 0, 0)

        # Based on the parameters, creates a vector with experiment objects.
        experiments_vector = []
        for weather in self.weathers:

            for iteration in range(len(poses_tasks)):
                poses = poses_tasks[iteration]
                vehicles = vehicles_tasks[iteration]
                pedestrians = pedestrians_tasks[iteration]

                conditions = CarlaSettings()
                conditions.set(
                    SendNonPlayerAgentsInfo=True,
                    NumberOfVehicles=vehicles,
                    NumberOfPedestrians=pedestrians,
                    WeatherId=weather

                )
                # Add all the cameras that were set for this experiments
                conditions.add_sensor(camera)
                experiment = Experiment()
                experiment.set(
                    Conditions=conditions,
                    Poses=poses,
                    Task=iteration,
                    Repetitions=1
                )
                experiments_vector.append(experiment)

        return experiments_vector
Exemplo n.º 16
0
    def _reset(self):
        self.num_steps = 0
        self.total_reward = 0
        self.prev_measurement = None
        self.prev_image = None
        self.episode_id = datetime.today().strftime("%Y-%m-%d_%H-%M-%S_%f")
        self.measurements_file = None

        # Create a CarlaSettings object. This object is a wrapper around
        # the CarlaSettings.ini file. Here we set the configuration we
        # want for the new episode.
        settings = CarlaSettings()
        self.scenario = random.choice(self.config["scenarios"])
        assert self.scenario["city"] == self.city, (self.scenario, self.city)
        self.weather = random.choice(self.scenario["weather_distribution"])
        settings.set(
            SynchronousMode=True,
            # ServerTimeOut=10000, # CarlaSettings: no key named 'ServerTimeOut'
            SendNonPlayerAgentsInfo=True,
            NumberOfVehicles=self.scenario["num_vehicles"],
            NumberOfPedestrians=self.scenario["num_pedestrians"],
            WeatherId=self.weather)
        settings.randomize_seeds()

        if self.config["use_depth_camera"]:
            camera1 = Camera("CameraDepth", PostProcessing="Depth")
            camera1.set_image_size(self.config["render_x_res"],
                                   self.config["render_y_res"])
            # camera1.set_position(30, 0, 130)
            camera1.set_position(0.5, 0.0, 1.6)
            # camera1.set_rotation(0.0, 0.0, 0.0)

            settings.add_sensor(camera1)

        camera2 = Camera("CameraRGB")
        camera2.set_image_size(self.config["render_x_res"],
                               self.config["render_y_res"])
        # camera2.set_position(30, 0, 130)
        # camera2.set_position(0.3, 0.0, 1.3)
        # camera2.set_position(2.0, 0.0, 1.4)
        # camera2.set_rotation(0.0, 0.0, 0.0)
        camera2.set_position(0.5, 0.0, 1.6)

        settings.add_sensor(camera2)

        # Setup start and end positions
        scene = self.client.load_settings(settings)
        positions = scene.player_start_spots
        self.start_pos = positions[self.scenario["start_pos_id"]]
        self.end_pos = positions[self.scenario["end_pos_id"]]
        self.start_coord = [
            self.start_pos.location.x // 100, self.start_pos.location.y // 100
        ]
        self.end_coord = [
            self.end_pos.location.x // 100, self.end_pos.location.y // 100
        ]
        print("Start pos {} ({}), end {} ({})".format(
            self.scenario["start_pos_id"], self.start_coord,
            self.scenario["end_pos_id"], self.end_coord))

        # Notify the server that we want to start the episode at the
        # player_start index. This function blocks until the server is ready
        # to start the episode.
        print("Starting new episode...")
        self.client.start_episode(self.scenario["start_pos_id"])

        # Process observations: self._read_observation() returns image and py_measurements.
        image, py_measurements = self._read_observation()
        self.prev_measurement = py_measurements
        return self.encode_obs(self.preprocess_image(image), py_measurements)
Exemplo n.º 17
0
    def build_experiments(self):
        """
        Creates the whole set of experiment objects,
        The experiments created depend on the selected Town.
        """

        # We set the camera
        # This single RGB camera is used on every experiment

        camera = Camera('CameraRGB')
        camera.set(FOV=100)
        camera.set_image_size(800, 600)
        #camera.set_image_size(1000, 1000)
        camera.set_position(2.0, 0.0, 1.4)
        camera.set_rotation(-15.0, 0, 0)
        if self._task == 'go-straight':
            task_select = 0
        elif self._task == 'right_turn':
            task_select = 1
        elif self._task == 'left_turn':
            task_select = 2
        elif self._task == 'intersection-straight':
            task_select = 3
        elif self._task == 'intersection-right':
            task_select = 4
        elif self._task == 'intersection-left':
            task_select = 5

        intersection_int_1 = int(self._intersection.split("_")[0])
        intersection_int_2 = int(self._intersection.split("_")[1])
        intersection_input = [intersection_int_1, intersection_int_2]

        if self._city_name == 'Town01':
            poses_tasks = self._poses_town01()
            vehicles_tasks = [0, 0, 0, 20]
            pedestrians_tasks = [0, 0, 0, 50]
        elif self._city_name == 'Town02':
            poses_tasks = self._poses_town02()
            vehicles_tasks = [0, 0, 0, 20]
            pedestrians_tasks = [0, 0, 0, 50]
        elif self._city_name[:-1] == 'Town01_nemesis':
            poses_tasks = [self._poses_town01_nemesis()[task_select]]
            vehicles_tasks = [0, 0, 0, 0, 0, 0]
            pedestrians_tasks = [0, 0, 0, 0, 0, 0]
        elif self._city_name[:-1] == 'Town02_nemesis':
            poses_tasks = [self._poses_town02_nemesis()[task_select]]
            vehicles_tasks = [0, 0, 0, 0, 0, 0]
            pedestrians_tasks = [0, 0, 0, 0, 0, 0]

        experiments_vector = []
        """repeating experiment"""
        num_iters = self._iters
        weathers_iters = [self._weather] * num_iters

        # 0 - Default
        # 1 - ClearNoon
        # 2 - CloudyNoon
        # 3 - WetNoon
        # 4 - WetCloudyNoon
        # 5 - MidRainyNoon
        # 6 - HardRainNoon
        # 7 - SoftRainNoon
        # 8 - ClearSunset
        # 9 - CloudySunset
        # 10 - WetSunset
        # 11 - WetCloudySunset
        # 12 - MidRainSunset
        # 13 - HardRainSunset
        # 14 - SoftRainSunset

        for weather in weathers_iters:

            for iteration in range(len(poses_tasks)):
                poses = poses_tasks[iteration]
                vehicles = vehicles_tasks[iteration]
                pedestrians = pedestrians_tasks[iteration]

                ## add here
                # random the scenario
                #weather = int(random.uniform(0,14))
                #vehicles = int(random.uniform(0,1000))
                #pedestrains = int(random.uniform(0,1000))

                #weather =
                ##
                # TODO: seed_vehicle_input, seed_pedestrain_input

                conditions = CarlaSettings()
                conditions.set(SendNonPlayerAgentsInfo=True,
                               NumberOfVehicles=vehicles,
                               NumberOfPedestrians=pedestrians,
                               WeatherId=weather)
                # Add all the cameras that were set for this experiments

                conditions.add_sensor(camera)

                experiment = Experiment()
                experiment.set(Conditions=conditions,
                               Poses=poses,
                               Task=iteration,
                               Repetitions=1)
                experiments_vector.append(experiment)

        return experiments_vector
Exemplo n.º 18
0
def run_carla_client(host, port, autopilot_on, save_images_to_disk,
                     image_filename_format, settings_filepath):
    # Here we will run 3 episodes with 300 frames each.
    number_of_episodes = 3
    frames_per_episode = 300

    # We assume the CARLA server is already waiting for a client to connect at
    # host:port. To create a connection we can use the `make_carla_client`
    # context manager, it creates a CARLA client object and starts the
    # connection. It will throw an exception if something goes wrong. The
    # context manager makes sure the connection is always cleaned up on exit.
    with make_carla_client(host, port) as client:
        print('CarlaClient connected')

        for episode in range(0, number_of_episodes):
            # Start a new episode.

            if settings_filepath is None:

                # Create a CarlaSettings object. This object is a wrapper around
                # the CarlaSettings.ini file. Here we set the configuration we
                # want for the new episode.
                settings = CarlaSettings()
                settings.set(SynchronousMode=True,
                             SendNonPlayerAgentsInfo=True,
                             NumberOfVehicles=20,
                             NumberOfPedestrians=40,
                             WeatherId=random.choice([1, 3, 7, 8, 14]))
                settings.randomize_seeds()

                # Now we want to add a couple of cameras to the player vehicle.
                # We will collect the images produced by these cameras every
                # frame.

                # The default camera captures RGB images of the scene.
                camera0 = Camera('CameraRGB')
                # Set image resolution in pixels.
                camera0.set_image_size(800, 600)
                # Set its position relative to the car in centimeters.
                camera0.set_position(30, 0, 130)
                settings.add_sensor(camera0)

                # Let's add another camera producing ground-truth depth.
                camera1 = Camera('CameraDepth', PostProcessing='Depth')
                camera1.set_image_size(800, 600)
                camera1.set_position(30, 0, 130)
                settings.add_sensor(camera1)

            else:

                # Alternatively, we can load these settings from a file.
                with open(settings_filepath, 'r') as fp:
                    settings = fp.read()

            # Now we load these settings into the server. The server replies
            # with a scene description containing the available start spots for
            # the player. Here we can provide a CarlaSettings object or a
            # CarlaSettings.ini file as string.
            scene = client.load_settings(settings)

            # Choose one player start at random.
            number_of_player_starts = len(scene.player_start_spots)
            player_start = random.randint(0, max(0,
                                                 number_of_player_starts - 1))

            # Notify the server that we want to start the episode at the
            # player_start index. This function blocks until the server is ready
            # to start the episode.
            print('Starting new episode...')
            client.start_episode(player_start)

            # Iterate every frame in the episode.
            for frame in range(0, frames_per_episode):

                # Read the data produced by the server this frame.
                measurements, sensor_data = client.read_data()

                # Print some of the measurements.
                print_measurements(measurements)

                # Save the images to disk if requested.
                if save_images_to_disk:
                    for name, image in sensor_data.items():
                        image.save_to_disk(
                            image_filename_format.format(episode, name, frame))

                # We can access the encoded data of a given image as numpy
                # array using its "data" property. For instance, to get the
                # depth value (normalized) at pixel X, Y
                #
                #     depth_array = sensor_data['CameraDepth'].data
                #     value_at_pixel = depth_array[Y, X]
                #

                # Now we have to send the instructions to control the vehicle.
                # If we are in synchronous mode the server will pause the
                # simulation until we send this control.

                if not autopilot_on:

                    client.send_control(steer=random.uniform(-1.0, 1.0),
                                        throttle=0.5,
                                        brake=False,
                                        hand_brake=False,
                                        reverse=False)

                else:

                    # Together with the measurements, the server has sent the
                    # control that the in-game autopilot would do this frame. We
                    # can enable autopilot by sending back this control to the
                    # server. We can modify it if wanted, here for instance we
                    # will add some noise to the steer.

                    control = measurements.player_measurements.autopilot_control
                    control.steer += random.uniform(-0.1, 0.1)
                    client.send_control(control)
Exemplo n.º 19
0
def run_carla_client(args):

    # Here we will run 3 episodes with 300 frames each.
    number_of_episodes = 10
    frames_per_episode = 10000

    # We assume the CARLA server is already waiting for a client to connect at
    # host:port. To create a connection we can use the `make_carla_client`
    # context manager, it creates a CARLA client object and starts the
    # connection. It will throw an exception if something goes wrong. The
    # context manager makes sure the connection is always cleaned up on exit.

    with make_carla_client(args.host, args.port) as client:
        print('CarlaClient connected')

        for i_episode in range(0, number_of_episodes):
            # Start a new episode.

            if not args.autopilot:
                lane_detection = Popen(
                    [
                        "/home/minghanz/lane-detection/newcheck/SFMotor/lane detection/lane-detection"
                    ],
                    cwd=
                    "/home/minghanz/lane-detection/newcheck/SFMotor/lane detection"
                )

            if args.settings_filepath is None:

                # Create a CarlaSettings object. This object is a wrapper around
                # the CarlaSettings.ini file. Here we set the configuration we
                # want for the new episode.
                weather_ID = random.choice(range(0, 15))
                settings = CarlaSettings()
                settings.set(
                    SynchronousMode=True,
                    SendNonPlayerAgentsInfo=True,
                    NumberOfVehicles=0,  # 20
                    NumberOfPedestrians=0,  # 40
                    WeatherId=
                    weather_ID,  # 2,                     # random.choice([1, 3, 7, 8, 14]),
                    QualityLevel=args.quality_level)  #,
                # MapName = 'Town01' )
                settings.randomize_seeds()
                # 1 (with shadow) sunny, 2 bright cloudy, 3 (with shadow), after rain, 4 (ambient light) after rain,
                # 5 middle rain, 6 big rain, 7 small rain, 8 dark, 9 cloudy,
                # 10(cloudy) 11(sunny) after rain, 12 13 rain,

                # Now we want to add a couple of cameras to the player vehicle.
                # We will collect the images produced by these cameras every
                # frame.

                # # The default camera captures RGB images of the scene.
                camera0 = Camera('CameraRGB')  # , PostProcessing='None'
                # Set image resolution in pixels.
                camera0.set_image_size(800, 600)
                # Set its position relative to the car in meters.
                camera0.set_position(0.3, 0, 2.30)  # 0.3, 0, 1.3
                settings.add_sensor(camera0)

                # Let's add another camera producing ground-truth depth.
                camera2 = Camera('CameraDepth', PostProcessing='Depth')
                camera2.set_image_size(800, 600)
                camera2.set_position(0.30, 0, 2.30)
                settings.add_sensor(camera2)

                camera3 = Camera('CameraSemantics',
                                 PostProcessing='SemanticSegmentation')
                camera3.set_image_size(800, 600)
                camera3.set_position(0.30, 0, 2.30)
                settings.add_sensor(camera3)

                if args.lidar:
                    lidar = Lidar('Lidar32')
                    lidar.set_position(0, 0, 2.50)
                    lidar.set_rotation(0, 0, 0)
                    lidar.set(Channels=32,
                              Range=50,
                              PointsPerSecond=100000,
                              RotationFrequency=20,
                              UpperFovLimit=10,
                              LowerFovLimit=-30)
                    settings.add_sensor(lidar)

            else:

                # Alternatively, we can load these settings from a file.
                with open(args.settings_filepath, 'r') as fp:
                    settings = fp.read()

            # Now we load these settings into the server. The server replies
            # with a scene description containing the available start spots for
            # the player. Here we can provide a CarlaSettings object or a
            # CarlaSettings.ini file as string.
            scene = client.load_settings(settings)

            # Choose one player start at random.
            number_of_player_starts = len(scene.player_start_spots)
            player_start = random.randint(0, max(0,
                                                 number_of_player_starts - 1))
            # player_start = 31

            # Notify the server that we want to start the episode at the
            # player_start index. This function blocks until the server is ready
            # to start the episode.
            print('\rStarting new episode %d...weather: %d, player_start: %d' %
                  (i_episode, weather_ID, player_start))
            client.start_episode(player_start)
            px_l = -1
            py_l = -1
            ######### Parameters for one test
            lc_num = 0
            lc_hold = False
            lc_start_turn = False
            lc_num_2 = 100  # for counting failures in turning
            lc_num_3 = 0
            #############

            if args.writepose:
                pose_txt_obj = open('pose_episode_%d.txt' % (i_episode), 'w')

            # Iterate every frame in the episode.
            for frame in range(0, frames_per_episode):

                # Read the data produced by the server this frame.
                measurements, sensor_data = client.read_data()

                #ZYX LIDAR
                # if True: #not lc_hold:
                #     result = run_cpp(sensor_data['Lidar32'].point_cloud, True)
                #     #print('LP:',LP,'END')

                #     lidar_success = False
                #     if result:
                #         lidar_success = True
                #         pts, kb, hdx, tlx = result

                # print('pts:',pts)
                # print('kb:',kb)
                # print('hdx:',hdx)
                # print('tlx:',tlx)
                if args.autopilot:
                    if measurements.player_measurements.forward_speed > 1:
                        sys.stdout.write(
                            '\r------------------------%d Running' % (frame))
                    else:
                        sys.stdout.write(
                            '\r------------------------%d Stopped' % (frame))
                    sys.stdout.flush()
                else:
                    print('------------------------%d' % (frame))

                #ZYX LIDAR

                if not args.autopilot:
                    image = sensor_data['CameraRGB'].data
                    lane_coef = send_image(image)
                    print("lane_coef: ", lane_coef)

                # Print some of the measurements.
                #print_measurements(measurements)

                # Save the images to disk if requested.
                if args.save_images_to_disk and measurements.player_measurements.forward_speed > 1:
                    for name, measurement in sensor_data.items():
                        filename = args.out_filename_format.format(
                            i_episode, weather_ID, name, frame)  # episode
                        measurement.save_to_disk(filename)

                # We can access the encoded data of a given image as numpy
                # array using its "data" property. For instance, to get the
                # depth value (normalized) at pixel X, Y
                #
                #     depth_array = sensor_data['CameraDepth'].data
                #     value_at_pixel = depth_array[Y, X]
                #

                if args.writepose:
                    player_measurements = measurements.player_measurements
                    cur_trans = player_measurements.transform.location
                    print('cur_trans: ', cur_trans)
                    cur_rot = player_measurements.transform.rotation
                    print('cur_rot: ', cur_rot)
                    pose_txt_obj.write('%f %f %f ' %
                                       (cur_trans.x, cur_trans.y, cur_trans.z))
                    pose_txt_obj.write(
                        '%f %f %f\n' %
                        (cur_rot.pitch, cur_rot.roll, cur_rot.yaw))

                # Now we have to send the instructions to control the vehicle.
                # If we are in synchronous mode the server will pause the
                # simulation until we send this control.

                if not args.autopilot:
                    player_measurements = measurements.player_measurements
                    pos_x = player_measurements.transform.location.x
                    pos_y = player_measurements.transform.location.y
                    speed = player_measurements.forward_speed * 3.6

                    #Traffic Light
                    # print('TrafficLight:-----------------')
                    # for agent in measurements.non_player_agents:
                    #     if agent.HasField('traffic_light'):
                    #         print(agent.id)
                    #         print(agent.traffic_light.transform)
                    #         print(agent.traffic_light.state)
                    #         print('-----------------')
                    #         break

                    #Traffic Light End

                    if px_l == -1:
                        px_l = pos_x
                    if py_l == -1:
                        py_l = pos_y
                    delta_x = pos_x - px_l
                    delta_y = pos_y - py_l
                    st = 0
                    #if pos_y < 11:
                    #   st = 0.3

                    # if lidar_success and hdx[0]<-2.2:
                    #     lc_num += 1
                    # else:
                    #     lc_num = 0

                    # if lc_hold:
                    #     st = 0.3

                    # if lc_num>2 and not lc_hold:
                    #     lc_hold = True
                    #     st = 0.2

                    # if  abs(lane_coef[2]) > 0.003 and frame > 100:
                    #     lc_num += 1
                    # else:
                    #     lc_num = 0

                    # if lc_num>5 and not lc_start_turn:
                    #     lc_start_turn = True

                    # if lc_start_turn and lane_coef[0] == 0:
                    #     lc_num_2 += 1

                    # if lc_hold:
                    #     st = 0.3

                    # if lc_num_2 > 5 and not lc_hold:
                    #     lc_hold = True
                    #     st = 0.25

                    if abs(lane_coef[2]) > 0.003 and frame > 300:
                        lc_num += 1
                    else:
                        lc_num = 0

                    if lc_num > 5 and not lc_start_turn:
                        lc_start_turn = True
                        lc_num_2 = 17

                    if lc_start_turn and lc_num_2 > 0:
                        lc_num_2 -= 1

                    if lc_hold:
                        st = 0.3

                    if lc_num_2 == 0 and not lc_hold:
                        lc_hold = True
                        st = 0.25

                    # print('lidar_success:', lidar_success )
                    print('lc_num:', lc_num)
                    print('lc_num_2:', lc_num_2)

                    if lc_hold and lane_coef[0] != 0:
                        a1 = lane_coef[1] + lane_coef[4]
                        a2 = lane_coef[0] + lane_coef[3] - 0.2
                        l = 5
                        k = 0.08
                        st = k * (a1 * l + a2)
                        print('a1:', a1)
                        print('a2:', a2)

                    if speed > 28:
                        br = (speed - 28) * 0.1
                        thr = 0
                    else:
                        br = 0
                        thr = (28 - speed) * 0.05 + 0.6
                    # if pos_y > 150:
                    #     thr = 1.6
                    #     br = 0

                    if lc_hold:
                        lc_num_3 += 1
                    print('lc_num_3:', lc_num_3)

                    if lc_num_3 > 185:
                        thr = 0
                        br = 1
                        st = 0
                # if pos_x > 5:
                #     st = -delta_y*10 + (2-pos_y)*0.8
                # if abs(st)<0.001:
                #      st = 0
                #st = (2-pos_y)*0.01
                    print('Steering:', st)
                    client.send_control(
                        #steer=random.uniform(-1.0, 1.0),
                        steer=st,
                        throttle=thr,
                        brake=br,
                        hand_brake=False,
                        reverse=False)
                    px_l = pos_x
                    py_l = pos_y
                else:

                    # Together with the measurements, the server has sent the
                    # control that the in-game autopilot would do this frame. We
                    # can enable autopilot by sending back this control to the
                    # server. We can modify it if wanted, here for instance we
                    # will add some noise to the steer.

                    control = measurements.player_measurements.autopilot_control
                    control.steer += random.uniform(-0.1, 0.1)
                    client.send_control(control)

            if args.writepose:
                pose_txt_obj.close()

            if not args.autopilot:
                lane_detection.kill()
Exemplo n.º 20
0
class CarlaOperator(Op):
    """Provides an ERDOS interface to the CARLA simulator.

    Args:
        synchronous_mode (bool): whether the simulator will wait for control
            input from the client.
    """
    def __init__(self,
                 name,
                 flags,
                 camera_setups=[],
                 lidar_stream_names=[],
                 log_file_name=None,
                 csv_file_name=None):
        super(CarlaOperator, self).__init__(name)
        self._flags = flags
        self._logger = setup_logging(self.name, log_file_name)
        self._csv_logger = setup_csv_logging(self.name + '-csv', csv_file_name)
        self.message_num = 0
        if self._flags.carla_high_quality:
            quality = 'Epic'
        else:
            quality = 'Low'
        self.settings = CarlaSettings()
        self.settings.set(
            SynchronousMode=self._flags.carla_synchronous_mode,
            SendNonPlayerAgentsInfo=True,
            NumberOfVehicles=self._flags.carla_num_vehicles,
            NumberOfPedestrians=self._flags.carla_num_pedestrians,
            WeatherId=self._flags.carla_weather,
            QualityLevel=quality)
        self.settings.randomize_seeds()
        self.lidar_streams = []
        for (camera_stream_name, camera_type, image_size,
             pos) in camera_setups:
            self.__add_camera(name=camera_stream_name,
                              postprocessing=camera_type,
                              image_size=image_size,
                              position=pos)
        for lidar_stream_name in lidar_stream_names:
            self.__add_lidar(name=lidar_stream_name)
        self.agent_id_map = {}
        self.pedestrian_count = 0

    @staticmethod
    def setup_streams(input_streams, camera_setups, lidar_stream_names):
        input_streams.add_callback(CarlaOperator.update_control)
        camera_streams = [
            DataStream(name=camera,
                       labels={
                           'sensor_type': 'camera',
                           'camera_type': camera_type
                       }) for (camera, camera_type, _, _) in camera_setups
        ]
        lidar_streams = [
            DataStream(name=lidar, labels={'sensor_type': 'lidar'})
            for lidar in lidar_stream_names
        ]
        return [
            DataStream(name='world_transform'),
            DataStream(name='vehicle_pos'),
            DataStream(name='acceleration'),
            DataStream(data_type=Float64, name='forward_speed'),
            DataStream(data_type=Float64, name='vehicle_collisions'),
            DataStream(data_type=Float64, name='pedestrian_collisions'),
            DataStream(data_type=Float64, name='other_collisions'),
            DataStream(data_type=Float64, name='other_lane'),
            DataStream(data_type=Float64, name='offroad'),
            DataStream(name='traffic_lights'),
            DataStream(name='pedestrians'),
            DataStream(name='vehicles'),
            DataStream(name='traffic_signs'),
        ] + camera_streams + lidar_streams

    def __add_camera(self,
                     name,
                     postprocessing,
                     image_size=(800, 600),
                     field_of_view=90.0,
                     position=(0.3, 0, 1.3),
                     rotation_pitch=0,
                     rotation_roll=0,
                     rotation_yaw=0):
        """Adds a camera and a corresponding output stream.

        Args:
            name: A string naming the camera.
            postprocessing: "SceneFinal", "Depth", "SemanticSegmentation".
        """
        camera = Camera(name,
                        PostProcessing=postprocessing,
                        FOV=field_of_view,
                        ImageSizeX=image_size[0],
                        ImageSizeY=image_size[1],
                        PositionX=position[0],
                        PositionY=position[1],
                        PositionZ=position[2],
                        RotationPitch=rotation_pitch,
                        RotationRoll=rotation_roll,
                        RotationYaw=rotation_yaw)

        self.settings.add_sensor(camera)

    def __add_lidar(self,
                    name,
                    channels=32,
                    max_range=50,
                    points_per_second=100000,
                    rotation_frequency=10,
                    upper_fov_limit=10,
                    lower_fov_limit=-30,
                    position=(0, 0, 1.4),
                    rotation_pitch=0,
                    rotation_yaw=0,
                    rotation_roll=0):
        """Adds a LIDAR sensor and a corresponding output stream.

        Args:
            name: A string naming the camera.
        """
        lidar = Lidar(name,
                      Channels=channels,
                      Range=max_range,
                      PointsPerSecond=points_per_second,
                      RotationFrequency=rotation_frequency,
                      UpperFovLimit=upper_fov_limit,
                      LowerFovLimit=lower_fov_limit,
                      PositionX=position[0],
                      PositionY=position[1],
                      PositionZ=position[2],
                      RotationPitch=rotation_pitch,
                      RotationYaw=rotation_yaw,
                      RotationRoll=rotation_roll)

        self.settings.add_sensor(lidar)
        output_stream = DataStream(name=name, labels={"sensor_type": "lidar"})
        self.lidar_streams.append(output_stream)

    def read_carla_data(self):
        read_start_time = time.time()
        measurements, sensor_data = self.client.read_data()
        measure_time = time.time()

        self._logger.info(
            'Got readings for game time {} and platform time {}'.format(
                measurements.game_timestamp, measurements.platform_timestamp))

        # Send measurements
        player_measurements = measurements.player_measurements
        vehicle_pos = ((player_measurements.transform.location.x,
                        player_measurements.transform.location.y,
                        player_measurements.transform.location.z),
                       (player_measurements.transform.orientation.x,
                        player_measurements.transform.orientation.y,
                        player_measurements.transform.orientation.z))

        world_transform = Transform(player_measurements.transform)

        timestamp = Timestamp(
            coordinates=[measurements.game_timestamp, self.message_num])
        self.message_num += 1
        ray.register_custom_serializer(Message, use_pickle=True)
        ray.register_custom_serializer(WatermarkMessage, use_pickle=True)
        watermark = WatermarkMessage(timestamp)
        self.get_output_stream('world_transform').send(
            Message(world_transform, timestamp))
        self.get_output_stream('world_transform').send(watermark)
        self.get_output_stream('vehicle_pos').send(
            Message(vehicle_pos, timestamp))
        self.get_output_stream('vehicle_pos').send(watermark)
        acceleration = (player_measurements.acceleration.x,
                        player_measurements.acceleration.y,
                        player_measurements.acceleration.z)
        self.get_output_stream('acceleration').send(
            Message(acceleration, timestamp))
        self.get_output_stream('acceleration').send(watermark)
        self.get_output_stream('forward_speed').send(
            Message(player_measurements.forward_speed, timestamp))
        self.get_output_stream('forward_speed').send(watermark)
        self.get_output_stream('vehicle_collisions').send(
            Message(player_measurements.collision_vehicles, timestamp))
        self.get_output_stream('vehicle_collisions').send(watermark)
        self.get_output_stream('pedestrian_collisions').send(
            Message(player_measurements.collision_pedestrians, timestamp))
        self.get_output_stream('pedestrian_collisions').send(watermark)
        self.get_output_stream('other_collisions').send(
            Message(player_measurements.collision_other, timestamp))
        self.get_output_stream('other_collisions').send(watermark)
        self.get_output_stream('other_lane').send(
            Message(player_measurements.intersection_otherlane, timestamp))
        self.get_output_stream('other_lane').send(watermark)
        self.get_output_stream('offroad').send(
            Message(player_measurements.intersection_offroad, timestamp))
        self.get_output_stream('offroad').send(watermark)

        vehicles = []
        pedestrians = []
        traffic_lights = []
        speed_limit_signs = []

        for agent in measurements.non_player_agents:
            if agent.HasField('vehicle'):
                pos = messages.Transform(agent.vehicle.transform)
                bb = messages.BoundingBox(agent.vehicle.bounding_box)
                forward_speed = agent.vehicle.forward_speed
                vehicle = messages.Vehicle(pos, bb, forward_speed)
                vehicles.append(vehicle)
            elif agent.HasField('pedestrian'):
                if not self.agent_id_map.get(agent.id):
                    self.pedestrian_count += 1
                    self.agent_id_map[agent.id] = self.pedestrian_count

                pedestrian_index = self.agent_id_map[agent.id]
                pos = messages.Transform(agent.pedestrian.transform)
                bb = messages.BoundingBox(agent.pedestrian.bounding_box)
                forward_speed = agent.pedestrian.forward_speed
                pedestrian = messages.Pedestrian(pedestrian_index, pos, bb,
                                                 forward_speed)
                pedestrians.append(pedestrian)
            elif agent.HasField('traffic_light'):
                transform = messages.Transform(agent.traffic_light.transform)
                traffic_light = messages.TrafficLight(
                    transform, agent.traffic_light.state)
                traffic_lights.append(traffic_light)
            elif agent.HasField('speed_limit_sign'):
                transform = messages.Transform(
                    agent.speed_limit_sign.transform)
                speed_sign = messages.SpeedLimitSign(
                    transform, agent.speed_limit_sign.speed_limit)
                speed_limit_signs.append(speed_sign)

        vehicles_msg = Message(vehicles, timestamp)
        self.get_output_stream('vehicles').send(vehicles_msg)
        self.get_output_stream('vehicles').send(watermark)
        pedestrians_msg = Message(pedestrians, timestamp)
        self.get_output_stream('pedestrians').send(pedestrians_msg)
        self.get_output_stream('pedestrians').send(watermark)
        traffic_lights_msg = Message(traffic_lights, timestamp)
        self.get_output_stream('traffic_lights').send(traffic_lights_msg)
        self.get_output_stream('traffic_lights').send(watermark)
        traffic_sings_msg = Message(speed_limit_signs, timestamp)
        self.get_output_stream('traffic_signs').send(traffic_sings_msg)
        self.get_output_stream('traffic_signs').send(watermark)

        # Send sensor data
        for name, measurement in sensor_data.items():
            data_stream = self.get_output_stream(name)
            if data_stream.get_label('camera_type') == 'SceneFinal':
                # Transform the Carla RGB images to BGR.
                data_stream.send(
                    Message(
                        pylot_utils.bgra_to_bgr(to_bgra_array(measurement)),
                        timestamp))
            else:
                data_stream.send(Message(measurement, timestamp))
            data_stream.send(watermark)

        self.client.send_control(**self.control)
        end_time = time.time()

        measurement_runtime = (measure_time - read_start_time) * 1000
        total_runtime = (end_time - read_start_time) * 1000
        self._logger.info('Carla measurement time {}; total time {}'.format(
            measurement_runtime, total_runtime))
        self._csv_logger.info('{},{},{},{}'.format(time_epoch_ms(), self.name,
                                                   measurement_runtime,
                                                   total_runtime))

    def read_data_at_frequency(self):
        period = 1.0 / self._flags.carla_step_frequency
        trigger_at = time.time() + period
        while True:
            time_until_trigger = trigger_at - time.time()
            if time_until_trigger > 0:
                time.sleep(time_until_trigger)
            else:
                self._logger.error(
                    'Cannot read Carla data at frequency {}'.format(
                        self._flags.carla_step_frequency))
            self.read_carla_data()
            trigger_at += period

    def update_control(self, msg):
        """Updates the control dict"""
        self.control.update(msg.data)

    def execute(self):
        # Subscribe to control streams
        self.control = {
            'steer': 0.0,
            'throttle': 0.0,
            'break': 0.0,
            'hand_break': False,
            'reverse': False
        }
        self.client = CarlaClient(self._flags.carla_host,
                                  self._flags.carla_port,
                                  timeout=10)
        self.client.connect()
        scene = self.client.load_settings(self.settings)

        # Choose one player start at random.
        number_of_player_starts = len(scene.player_start_spots)
        player_start = self._flags.carla_start_player_num
        if self._flags.carla_random_player_start:
            player_start = np.random.randint(
                0, max(0, number_of_player_starts - 1))

        self.client.start_episode(player_start)

        self.read_data_at_frequency()
        self.spin()
def run_carla_clients(args):
    filename = '_images_repeatability/server{:d}/{:0>6d}.png'
    with make_carla_client(args.host1, args.port1) as client1:
        logging.info('1st client connected')
        with make_carla_client(args.host2, args.port2) as client2:
            logging.info('2nd client connected')

            settings = CarlaSettings()
            settings.set(SynchronousMode=True,
                         SendNonPlayerAgentsInfo=True,
                         NumberOfVehicles=50,
                         NumberOfPedestrians=50,
                         WeatherId=random.choice([1, 3, 7, 8, 14]))
            settings.randomize_seeds()

            if args.images_to_disk:
                camera = Camera('DefaultCamera')
                camera.set_image_size(800, 600)
                settings.add_sensor(camera)

            scene1 = client1.load_settings(settings)
            scene2 = client2.load_settings(settings)

            number_of_player_starts = len(scene1.player_start_spots)
            assert number_of_player_starts == len(scene2.player_start_spots)
            player_start = random.randint(0, max(0,
                                                 number_of_player_starts - 1))
            logging.info(
                'start episode at %d/%d player start (run forever, press ctrl+c to cancel)',
                player_start, number_of_player_starts)

            client1.start_episode(player_start)
            client2.start_episode(player_start)

            frame = 0
            while True:
                frame += 1

                meas1, sensor_data1 = client1.read_data()
                meas2, sensor_data2 = client2.read_data()

                player1 = meas1.player_measurements
                player2 = meas2.player_measurements

                images1 = [
                    x for x in sensor_data1.values() if isinstance(x, Image)
                ]
                images2 = [
                    x for x in sensor_data2.values() if isinstance(x, Image)
                ]

                control1 = player1.autopilot_control
                control2 = player2.autopilot_control

                try:
                    assert len(images1) == len(images2)
                    assert len(meas1.non_player_agents) == len(
                        meas2.non_player_agents)
                    assert player1.transform.location.x == player2.transform.location.x
                    assert player1.transform.location.y == player2.transform.location.y
                    assert player1.transform.location.z == player2.transform.location.z
                    assert control1.steer == control2.steer
                    assert control1.throttle == control2.throttle
                    assert control1.brake == control2.brake
                    assert control1.hand_brake == control2.hand_brake
                    assert control1.reverse == control2.reverse
                except AssertionError:
                    logging.exception('assertion failed')

                if args.images_to_disk:
                    assert len(images1) == 1
                    images1[0].save_to_disk(filename.format(1, frame))
                    images2[0].save_to_disk(filename.format(2, frame))

                client1.send_control(control1)
                client2.send_control(control2)