Example #1
0
    def __add_lidar(self,
                    name,
                    channels=32,
                    max_range=50,
                    points_per_second=100000,
                    rotation_frequency=10,
                    upper_fov_limit=10,
                    lower_fov_limit=-30,
                    position=(0, 0, 1.4),
                    rotation_pitch=0,
                    rotation_yaw=0,
                    rotation_roll=0):
        """Adds a LIDAR sensor and a corresponding output stream.

        Args:
            name: A string naming the camera.
        """
        lidar = Lidar(name,
                      Channels=channels,
                      Range=max_range,
                      PointsPerSecond=points_per_second,
                      RotationFrequency=rotation_frequency,
                      UpperFovLimit=upper_fov_limit,
                      LowerFovLimit=lower_fov_limit,
                      PositionX=position[0],
                      PositionY=position[1],
                      PositionZ=position[2],
                      RotationPitch=rotation_pitch,
                      RotationYaw=rotation_yaw,
                      RotationRoll=rotation_roll)

        self.settings.add_sensor(lidar)
        output_stream = DataStream(name=name, labels={"sensor_type": "lidar"})
        self.lidar_streams.append(output_stream)
 def run(self):
     settings = CarlaSettings(QualityLevel='Low')
     settings.add_sensor(Lidar('DefaultLidar'))
     settings.add_sensor(Camera('DefaultCamera'))
     settings.add_sensor(Camera('DefaultDepth', PostProcessing='Depth'))
     settings.add_sensor(
         Camera('DefaultSemSeg', PostProcessing='SemanticSegmentation'))
     self.run_carla_client(settings, 3, 200)
Example #3
0
def create_lidar(lidar_position=(0., 0., 2.5)):
    log.info("Adding Lidar")
    lidar = Lidar('Lidar32')
    lidar.set_position(*lidar_position)
    lidar.set_rotation(0, 0, 0)
    lidar.set(
        Channels=32,
        Range=50,
        PointsPerSecond=100000,
        RotationFrequency=10,
        UpperFovLimit=10,
        LowerFovLimit=-30)
    return lidar
Example #4
0
def carla_init(client):
    """

    :param client:
    :return:
    """
    settings = CarlaSettings()
    settings.set(SynchronousMode=True,
                 SendNonPlayerAgentsInfo=True,
                 NumberOfVehicles=20,
                 NumberOfPedestrians=40,
                 WeatherId=random.choice([1, 3, 7, 8, 14]),
                 QualityLevel='Epic')

    # CAMERA
    camera0 = Camera('CameraRGB')
    # Set image resolution in pixels.
    camera0.set_image_size(800, 600)
    # Set its position relative to the car in meters.
    camera0.set_position(0.30, 0, 1.30)
    settings.add_sensor(camera0)

    # Let's add another camera producing ground-truth depth.
    camera1 = Camera('CameraDepth', PostProcessing='Depth')
    camera1.set_image_size(800, 600)
    camera1.set_position(0.30, 0, 1.30)
    settings.add_sensor(camera1)

    # LIDAR
    lidar = Lidar('Lidar32')
    lidar.set_position(0, 0, 2.50)
    lidar.set_rotation(0, 0, 0)
    lidar.set(Channels=32,
              Range=50,
              PointsPerSecond=100000,
              RotationFrequency=10,
              UpperFovLimit=10,
              LowerFovLimit=-30)
    settings.add_sensor(lidar)

    scene = client.load_settings(settings)

    # define the starting point of the agent
    player_start = 140
    client.start_episode(player_start)
    print('Starting new episode at %r, %d...' % (scene.map_name, player_start))
Example #5
0
    def __add_lidar(self, lidar_setup):
        """Adds a LIDAR sensor and a corresponding output stream.

        Args:
            lidar_setup: A LidarSetup object..
        """
        transform = lidar_setup.get_transform()
        lidar = Lidar(lidar_setup.name,
                      Channels=lidar_setup.channels,
                      Range=lidar_setup.range,
                      PointsPerSecond=lidar_setup.points_per_second,
                      RotationFrequency=lidar_setup.rotation_frequency,
                      UpperFovLimit=lidar_setup.upper_fov,
                      LowerFovLimit=lidar_setup.lower_fov,
                      PositionX=transform.location.x,
                      PositionY=transform.location.y,
                      PositionZ=transform.location.z,
                      RotationPitch=transform.rotation.pitch,
                      RotationYaw=transform.rotation.yaw,
                      RotationRoll=transform.rotation.roll)

        self._settings.add_sensor(lidar)
Example #6
0
def run_carla_client(args):

    with make_carla_client(args.host, args.port) as client:
        print('CarlaClient connected')
        while True:

            settings = CarlaSettings()
            settings.set(
                SynchronousMode=True,
                SendNonPlayerAgentsInfo=True,
                NumberOfVehicles=0,
                NumberOfPedestrians=0,
                WeatherId=3,  #1, 3, 7, 8, 14
                QualityLevel=args.quality_level)
            settings.randomize_seeds()

            # Now we want to add a couple of cameras to the player vehicle.
            # We will collect the images produced by these cameras every
            # frame.

            # The default camera captures RGB images of the scene.
            camera0 = sensor.Camera('CameraRGB', PostProcessing='SceneFinal')
            # Set image resolution in pixels.
            w = 800
            h = 600
            camera0.set_image_size(800, 600)
            # Set its position relative to the car in meters.
            camera0.set_position(0.30, 0, 4)
            settings.add_sensor(camera0)

            # Let's add another camera producing ground-truth depth.
            depth_camera = sensor.Camera('CameraDepth', PostProcessing='Depth')
            depth_camera.set_image_size(800, 600)
            depth_camera.set_position(x=0.30, y=0, z=4)
            settings.add_sensor(depth_camera)

            if args.lidar:
                lidar = Lidar('Lidar32')
                lidar.set_position(0, 0, 2.50)
                lidar.set_rotation(0, 0, 0)
                lidar.set(Channels=32,
                          Range=50,
                          PointsPerSecond=100000,
                          RotationFrequency=10,
                          UpperFovLimit=10,
                          LowerFovLimit=-30)
                settings.add_sensor(lidar)

            scene = client.load_settings(settings)

            # Choose one player start at random.
            number_of_player_starts = len(scene.player_start_spots)
            player_start = random.randint(0, max(0,
                                                 number_of_player_starts - 1))

            print('Starting new episode at %r...' % scene.map_name)
            client.start_episode(player_start)
            action_model = roughPPO.actor_model((h, w, 3), (3, 1), (20, 2))
            critic_model = roughPPO.critic_model((h, w, 3), (3, 1), (20, 2),
                                                 (2, 1))
            actor_action = [0, 0]
            critic_score = 0
            ppo_check = 0
            # Iterate every frame in the episode.
            ppo_iter_count = 0
            while True:

                # Read the data produced by the server this frame.

                measurements, sensor_data = client.read_data()
                # print_measurements(measurements)
                main_image = sensor_data.get("CameraRGB", None)

                image_array = image_converter.to_rgb_array(main_image)

                point_cloud = image_converter.depth_to_local_point_cloud(
                    sensor_data['CameraDepth'], image_array, max_depth=1000)

                image_array = cv2.cvtColor(image_array, cv2.COLOR_RGB2BGR)
                cv2.imshow("FrontCam View", image_array)
                reverse = 0
                try:
                    steer_val = 0

                    bez_mask, coord_trajectory, trajectory, world_x_left, world_x_right, world_z_left, world_z_right = floodfill(
                        image_array, point_cloud)

                    track_point_yaw = trajectory[-5]
                    yaw_error = np.arctan(
                        (0 - track_point_yaw[0]) / (track_point_yaw[1]))
                    state_update_long(trajectory)
                    state_update_lat(world_x_left, world_x_right, yaw_error)

                    if ppo_check == 1:
                        reward_score = roughPPO.reward(actor_action,
                                                       world_x_left,
                                                       world_x_right)
                        if len(SDC_data_store.throttle_queue) < 50:
                            SDC_data_store.throttle_queue.append(
                                actor_action[0])
                            SDC_data_store.steer_queue.append(actor_action[1])
                            SDC_data_store.rewards_queue.append(reward_score)
                            SDC_data_store.critic_score.append(critic_score)
                        else:
                            print("IDHAR!!!!!!!!!!")
                            SDC_data_store.throttle_queue.popleft()
                            SDC_data_store.throttle_queue.append(
                                actor_action[0])
                            SDC_data_store.steer_queue.popleft()
                            SDC_data_store.steer_queue.append(actor_action[1])
                            SDC_data_store.rewards_queue.popleft()
                            SDC_data_store.rewards_queue.append(reward_score)
                            SDC_data_store.critic_score.popleft()
                            SDC_data_store.critic_score.append(reward_score)

                        if (len(SDC_data_store.rewards_queue) > 1):
                            returns, advs = roughPPO.advantages(
                                list(SDC_data_store.critic_score),
                                list(SDC_data_store.rewards_queue))
                            loss = roughPPO.ppo_loss(
                                advs, SDC_data_store.rewards_queue,
                                SDC_data_store.critic_score)
                            action_model.compile(optimizer=Adam(lr=1e-3),
                                                 loss=loss)
                            critic_model.compile(optimizer=Adam(lr=1e-3),
                                                 loss='mse')
                            print("LOSS : " + str(loss))
                        ppo_check = 0
                    if ppo_iter_count % 20 == 0 and ppo_iter_count > 0 and ppo_check == 0:
                        ppo_check = 1
                        action_output, critic_output = ppo_input(
                            measurements, trajectory, image_array,
                            action_model, critic_model)
                        actor_action = action_output[0]
                        critic_score = critic_output[0][0]
                        throttle_a = actor_action[0]
                        steer_val = actor_action[1]

                    else:
                        throttle_a = PID_throttle(trajectory, reverse)
                        cv2.imshow("bez", bez_mask)
                        steer_val = PID_steer(trajectory, reverse,
                                              world_x_left, world_x_right)

                    check = SDC_data_store.shadow_check

                    client.send_control(steer=steer_val * check,
                                        throttle=throttle_a * check + 0.5 *
                                        (1 - check),
                                        brake=0.0,
                                        hand_brake=False,
                                        reverse=False)
                    SDC_data_store.turn_value = steer_val
                except Exception as e:
                    print(e)

                    client.send_control(steer=SDC_data_store.turn_value *
                                        check,
                                        throttle=1,
                                        brake=0.85,
                                        hand_brake=True,
                                        reverse=False)

                cv2.waitKey(1)

                if SDC_data_store.count % 10 == 0:
                    print("frame: ", SDC_data_store.count)

                # if SDC_data_store.count%10==0:
                #     SDC_data_store.sum_cte = 0
                SDC_data_store.count += 1
                print("*********\n")
                ppo_iter_count += 1
Example #7
0
def run_carla_client(args):
    # Here we will run 3 episodes with 300 frames each.
    number_of_episodes = 5
    cut_per_episode = 40
    frames_per_cut = 200


    # We assume the CARLA server is already waiting for a client to connect at
    # host:port. To create a connection we can use the `make_carla_client`
    # context manager, it creates a CARLA client object and starts the
    # connection. It will throw an exception if something goes wrong. The
    # context manager makes sure the connection is always cleaned up on exit.
    with make_carla_client(args.host, args.port) as client:
        print('CarlaClient connected')
        for episode in range(0, number_of_episodes):
            print("input any key to continue...")
            start = input()
            # each episode dir store a set of traindata.  if dir not existed, then make it
            pathdir = '/home/kadn/dataTrain/episode_{:0>3}'.format(episode)
            mkdir(pathdir)

            # Create a CarlaSettings object. This object is a wrapper around
            # the CarlaSettings.ini file. Here we set the configuration we
            # want for the new episode.
            settings = CarlaSettings()
            settings.set(
                SynchronousMode=True,
                SendNonPlayerAgentsInfo=True,
                NumberOfVehicles=20,
                NumberOfPedestrians=40,
                # WeatherId=random.choice([1, 3, 7, 8, 14]),
                WeatherId = 1,
                QualityLevel=args.quality_level)
            settings.randomize_seeds()
            # Now we want to add a couple of cameras to the player vehicle.
            # We will collect the images produced by these cameras every
            # frame.

            # The default camera captures RGB images of the scene.
            camera0 = Camera('CameraRGB')
            # Set image resolution in pixels.
            camera0.set_image_size(88, 200)
            # Set its position relative to the car in meters.
            camera0.set_position(0.30, 0, 1.30)
            settings.add_sensor(camera0)

            # Let's add another camera producing ground-truth depth.
            camera1 = Camera('CameraDepth', PostProcessing='Depth')
            camera1.set_image_size(200, 88)
            camera1.set_position(0.30, 0, 1.30)
            settings.add_sensor(camera1)

            camera2 = Camera('CameraSemSeg', PostProcessing='SemanticSegmentation')
            camera2.set_image_size(88, 200)
            camera2.set_position(2.0, 0.0, 1.4)
            camera2.set_rotation(0.0, 0.0, 0.0)
            settings.add_sensor(camera2)
            # if args.lidar:
            lidar = Lidar('Lidar32')
            lidar.set_position(0, 0, 2.50)
            lidar.set_rotation(0, 0, 0)
            lidar.set(
                Channels=0,
                Range=30,
                PointsPerSecond=200000,
                RotationFrequency=10,
                UpperFovLimit=0,
                LowerFovLimit=0)
            settings.add_sensor(lidar)

            # else:
            #
            #     # Alternatively, we can load these settings from a file.
            #     with open(args.settings_filepath, 'r') as fp:
            #         settings = fp.read()

            # Now we load these settings into the server. The server replies
            # with a scene description containing the available start spots for
            # the player. Here we can provide a CarlaSettings object or a
            # CarlaSettings.ini file as string.
            scene = client.load_settings(settings)

            # Choose one player start at random.
            # number_of_player_starts = len(scene.player_start_spots)
            # player_start = random.randint(0, max(0, number_of_player_starts - 1))
            player_start = 1

            # Notify the server that we want to start the episode at the
            # player_start index. This function blocks until the server is ready
            # to start the episode.
            print('Starting new episode at %r...' % scene.map_name)
            # Start a new episode.
            client.start_episode(player_start)

            for cut_per_episode in range(0,cut_per_episode):
                num = fileCounter(pathdir)
                filename = "data_{:0>6}.h5".format(num)
                filepath = pathdir + '/' + filename
                f = h5py.File(filepath, "w")
                rgb_file = f.create_dataset("rgb", (200, 88, 200), np.uint8)
                seg_file = f.create_dataset("seg", (200, 88, 200), np.uint8)
                lidar_file = f.create_dataset('lidar',(200,200,200),np.uint8)
                startendpoint = f.create_dataset('startend',(1,2),np.float32)
                targets_file = f.create_dataset("targets", (200, 28), np.float32)
                index_file = 0

                # Iterate every frame in the episode.
                for frame in range(0, frames_per_cut):
                    # Read the data produced by the server this frame.
                    measurements, sensor_data = client.read_data()

                    # get data and store
                    sensors, targets_data = record_train_data(measurements,sensor_data)
                    rgb_file[:,:,index_file] = sensors['rgb']
                    seg_file[:,:,index_file] = sensors['seg']
                    lidar_file[:,:,index_file] = sensors['lidar']
                    targets_file[index_file,:] = targets_data
                    index_file += 1

                    # We can access the encoded data of a given image as numpy
                    # array using its "data" property. For instance, to get the
                    # depth value (normalized) at pixel X, Y
                    #
                    #     depth_array = sensor_data['CameraDepth'].data
                    #     value_at_pixel = depth_array[Y, X]
                    #

                    # Now we have to send the instructions to control the vehicle.
                    # If we are in synchronous mode the server will pause the
                    # simulation until we send this control.

                    if  args.autopilot:

                        client.send_control(
                            steer=0,
                            throttle=0.8,
                            brake=0.0,
                            hand_brake=False,
                            reverse=False)

                    else:

                        # Together with the measurements, the server has sent the
                        # control that the in-game autopilot would do this frame. We
                        # can enable autopilot by sending back this control to the
                        # server. We can modify it if wanted, here for instance we
                        # will add some noise to the steer.

                        control = measurements.player_measurements.autopilot_control
                        control.steer += random.uniform(-0.05, 0.05)
                        client.send_control(control)
def run_carla_client(pub, host, port):
    print('Attempt to have CarlaClient connected...')
    # Connect with the server
    with make_carla_client(host, port) as client:
        print('CarlaClient connected')

        settings = CarlaSettings()
        settings.set(SynchronousMode=True,
                     SendNonPlayerAgentsInfo=False,
                     NumberOfVehicles=20,
                     NumberOfPedestrians=40,
                     WeatherId=random.choice([1, 3, 7, 8, 14]))
        settings.randomize_seeds()

        lidar1_name = 'LidarMain32B'
        lidar1 = Lidar(lidar1_name)
        lidar1.set(Channels=32,
                   Range=200,
                   PointsPerSecond=640000,
                   RotationFrequency=10,
                   UpperFovLimit=15.0,
                   LowerFovLimit=-25.0,
                   GaussianNoise=3.0,
                   DropOutPattern=0.0,
                   LidarType=1,
                   HorizonRange=360.0,
                   DebugFlag=0)
        lidar1.set_position(x=0, y=0, z=2.50)
        lidar1.set_rotation(pitch=0, yaw=0, roll=0)
        settings.add_sensor(lidar1)

        lidar2_name = 'LidarSub16L'
        lidar2 = Lidar(lidar2_name)
        lidar2.set(Channels=16,
                   Range=150,
                   PointsPerSecond=80000,
                   RotationFrequency=10,
                   UpperFovLimit=15.0,
                   LowerFovLimit=-15.0,
                   GaussianNoise=0.02,
                   DropOutPattern=1.0,
                   LidarType=2)
        lidar2.set_position(x=0, y=0.4, z=2.20)
        lidar2.set_rotation(pitch=0, yaw=0, roll=45)
        settings.add_sensor(lidar2)

        lidar3_name = 'LidarSub16R'
        lidar3 = Lidar(lidar3_name)
        lidar3.set(Channels=16,
                   Range=150,
                   PointsPerSecond=80000,
                   RotationFrequency=10,
                   UpperFovLimit=15.0,
                   LowerFovLimit=-15.0,
                   GaussianNoise=0.02,
                   DropOutPattern=1.0,
                   LidarType=2)
        lidar3.set_position(x=0, y=-0.4, z=2.20)
        lidar3.set_rotation(pitch=0, yaw=0, roll=-45)
        settings.add_sensor(lidar3)

        lidar4_name = 'LidarMain32B_Ref'
        lidar4 = Lidar(lidar4_name)
        lidar4.set(Channels=32,
                   Range=200,
                   PointsPerSecond=640000,
                   RotationFrequency=10,
                   UpperFovLimit=15.0,
                   LowerFovLimit=-25.0,
                   GaussianNoise=0.0,
                   DropOutPattern=0.0,
                   LidarType=1,
                   HorizonRange=360.0,
                   DebugFlag=0)
        lidar4.set_position(x=0, y=0, z=2.50)
        lidar4.set_rotation(pitch=0, yaw=0, roll=0)
        settings.add_sensor(lidar4)

        lidar5_name = 'LidarMEMS'
        lidar5 = Lidar(lidar5_name)
        lidar5.set(Channels=100,
                   Range=200,
                   PointsPerSecond=690000,
                   RotationFrequency=10,
                   UpperFovLimit=10.0,
                   LowerFovLimit=-10.0,
                   GaussianNoise=3.0,
                   DropOutPattern=1.0,
                   LidarType=2,
                   HorizonRange=63.0,
                   DebugFlag=0)
        lidar5.set_position(x=0, y=0, z=2.0)
        lidar5.set_rotation(pitch=0, yaw=0, roll=0)
        settings.add_sensor(lidar5)

        client.load_settings(settings)
        client.start_episode(0)  # Start at location index id '0'

        # Iterate every frame in the episode except for the first one.
        frame = 0
        while True:
            frame += 1
            measurements, sensor_data = client.read_data()

            RSlidar32M_points = sensor_data[lidar1_name].point_cloud.array
            RSlidar32M_points_msg = PointCloud_Gen(RSlidar32M_points)
            pub['RSlidar32M'].publish(RSlidar32M_points_msg)

            RSlidar32Ms_points = sensor_data[lidar4_name].point_cloud.array
            RSlidar32Ms_points_msg = PointCloud_Gen(RSlidar32Ms_points)
            pub['RSlidar32M_Ref'].publish(RSlidar32Ms_points_msg)

            RSlidarMEMS_points = sensor_data[lidar5_name].point_cloud.array
            RSlidarMEMS_points_msg = PointCloud_Gen(RSlidarMEMS_points)
            pub['RSlidarMEMS'].publish(RSlidarMEMS_points_msg)

            RSlidar16L_points = sensor_data[lidar2_name].point_cloud.array
            RSlidar16L_points_msg = PointCloud_Gen(RSlidar16L_points)
            pub['RSlidar16L'].publish(RSlidar16L_points_msg)

            RSlidar16R_points = sensor_data[lidar3_name].point_cloud.array
            RSlidar16R_points_msg = PointCloud_Gen(RSlidar16R_points)
            pub['RSlidar16R'].publish(RSlidar16R_points_msg)

            rospy.loginfo("Send {} frame pointcloud ".format(frame))
            client.send_control(
                measurements.player_measurements.autopilot_control)
Example #9
0
def run_carla_client(args):
    # Here we will run 3 episodes with 300 frames each.
    # number_of_episodes = 3
    frames_per_episode = 10000

    # We assume the CARLA server is already waiting for a client to connect at
    # host:port. To create a connection we can use the `make_carla_client`
    # context manager, it creates a CARLA client object and starts the
    # connection. It will throw an exception if something goes wrong. The
    # context manager makes sure the connection is always cleaned up on exit.
    with make_carla_client(args.host, args.port) as client:
        print('CarlaClient connected')

        # for episode in range(0, number_of_episodes):
        # Start a new episode.

        if args.settings_filepath is None:

            # Create a CarlaSettings object. This object is a wrapper around
            # the CarlaSettings.ini file. Here we set the configuration we
            # want for the new episode.
            settings = CarlaSettings()
            settings.set(
                SynchronousMode=True,
                SendNonPlayerAgentsInfo=True,
                NumberOfVehicles=0,  # 20
                NumberOfPedestrians=0,  # 40
                WeatherId=1,  # random.choice([1, 3, 7, 8, 14]),
                QualityLevel=args.quality_level)
            settings.randomize_seeds()

            # Now we want to add a couple of cameras to the player vehicle.
            # We will collect the images produced by these cameras every
            # frame.

            # The default camera captures RGB images of the scene.
            camera0 = Camera('CameraRGB')
            # Set image resolution in pixels.
            camera0.set_image_size(800, 600)
            # Set its position relative to the car in meters.
            camera0.set_position(0.30, 0, 1.30)
            settings.add_sensor(camera0)

            # Let's add another camera producing ground-truth depth.
            camera1 = Camera('CameraDepth', PostProcessing='Depth')
            camera1.set_image_size(800, 600)
            camera1.set_position(0.30, 0, 1.30)
            settings.add_sensor(camera1)

            if args.lidar:
                lidar = Lidar('Lidar32')
                lidar.set_position(0, 0, 2.50)
                lidar.set_rotation(0, 0, 0)
                lidar.set(Channels=32,
                          Range=50,
                          PointsPerSecond=100000,
                          RotationFrequency=10,
                          UpperFovLimit=10,
                          LowerFovLimit=-30)
                settings.add_sensor(lidar)

        else:

            # Alternatively, we can load these settings from a file.
            with open(args.settings_filepath, 'r') as fp:
                settings = fp.read()

        # Now we load these settings into the server. The server replies
        # with a scene description containing the available start spots for
        # the player. Here we can provide a CarlaSettings object or a
        # CarlaSettings.ini file as string.
        scene = client.load_settings(settings)

        # Choose one player start at random.
        # number_of_player_starts = len(scene.player_start_spots)
        # player_start = random.randint(0, max(0, number_of_player_starts - 1))
        player_start = 31

        # Notify the server that we want to start the episode at the
        # player_start index. This function blocks until the server is ready
        # to start the episode.
        print('Starting new episode...')
        client.start_episode(player_start)
        px_l = -1
        py_l = -1

        if args.writepose:
            pose_txt_obj = open('pose.txt', 'w')

        # Iterate every frame in the episode.
        for frame in range(0, frames_per_episode):

            # Read the data produced by the server this frame.
            measurements, sensor_data = client.read_data()

            # Print some of the measurements.
            #print_measurements(measurements)

            # Save the images to disk if requested.
            if args.save_images_to_disk:
                for name, measurement in sensor_data.items():
                    filename = args.out_filename_format.format(
                        0, name, frame)  # episode
                    measurement.save_to_disk(filename)

            # We can access the encoded data of a given image as numpy
            # array using its "data" property. For instance, to get the
            # depth value (normalized) at pixel X, Y
            #
            #     depth_array = sensor_data['CameraDepth'].data
            #     value_at_pixel = depth_array[Y, X]
            #

            if args.writepose:
                player_measurements = measurements.player_measurements
                cur_trans = player_measurements.transform.location
                print('cur_trans: ', cur_trans)
                cur_rot = player_measurements.transform.rotation
                print('cur_rot: ', cur_rot)
                pose_txt_obj.write('%f %f %f ' %
                                   (cur_trans.x, cur_trans.y, cur_trans.z))
                pose_txt_obj.write('%f %f %f\n' %
                                   (cur_rot.pitch, cur_rot.roll, cur_rot.yaw))

            # Now we have to send the instructions to control the vehicle.
            # If we are in synchronous mode the server will pause the
            # simulation until we send this control.

            if not args.autopilot:
                player_measurements = measurements.player_measurements
                pos_x = player_measurements.transform.location.x
                pos_y = player_measurements.transform.location.y
                speed = player_measurements.forward_speed * 3.6

                #Traffic Light
                # print('TrafficLight:-----------------')
                # for agent in measurements.non_player_agents:
                #     if agent.HasField('traffic_light'):
                #         print(agent.id)
                #         print(agent.traffic_light.transform)
                #         print(agent.traffic_light.state)
                #         print('-----------------')
                #         break

                #Traffic Light End

                if px_l == -1:
                    px_l = pos_x
                if py_l == -1:
                    py_l = pos_y
                delta_x = pos_x - px_l
                delta_y = pos_y - py_l
                st = 0
                if pos_y < 11:
                    st = 0.3
                if speed > 28:
                    br = (speed - 28) * 0.1
                    thr = 0
                else:
                    br = 0
                    thr = (28 - speed) * 0.05 + 0.6
                if pos_y > 150:
                    thr = 1
                    br = 0
                if pos_x > 5:
                    #if pos_y < 11:
                    st = -delta_y * 10 + (2 - pos_y) * 0.8
                if abs(st) < 0.001:
                    st = 0
                    #st = (2-pos_y)*0.01
                print('Steering:', st)
                client.send_control(
                    #steer=random.uniform(-1.0, 1.0),
                    steer=st,
                    throttle=thr,
                    brake=br,
                    hand_brake=False,
                    reverse=False)
                px_l = pos_x
                py_l = pos_y
            else:

                # Together with the measurements, the server has sent the
                # control that the in-game autopilot would do this frame. We
                # can enable autopilot by sending back this control to the
                # server. We can modify it if wanted, here for instance we
                # will add some noise to the steer.

                control = measurements.player_measurements.autopilot_control
                control.steer += random.uniform(-0.1, 0.1)
                client.send_control(control)

        if args.writepose:
            pose_txt_obj.close()
Example #10
0
def run_carla_client(args):
    model = lm('/home/ritvik/PythonClient/model_RGB.hdf5')   
    # Here we will run 3 episodes with 300 frames each.
    number_of_episodes = 3
    frames_per_episode =900

    # We assume the CARLA server is already waiting for a client to connect at
    # host:port. To create a connection we can use the `make_carla_client`
    # context manager, it creates a CARLA client object and starts the
    # connection. It will throw an exception if something goes wrong. The
    # context manager makes sure the connection is always cleaned up on exit.
    with make_carla_client(args.host, args.port) as client:
        print('CarlaClient connected')

        for episode in range(0, number_of_episodes):
            # Start a new episode.

            if args.settings_filepath is None:

                # Create a CarlaSettings object. This object is a wrapper around
                # the CarlaSettings.ini file. Here we set the configuration we
                # want for the new episode.
                settings = CarlaSettings()
                settings.set(
                    SynchronousMode=True,
                    SendNonPlayerAgentsInfo=True,
                    NumberOfVehicles=20,
                    NumberOfPedestrians=40,
                    WeatherId=random.choice([1, 3, 7, 8, 14]),
                    QualityLevel=args.quality_level)
                settings.randomize_seeds()

                # Now we want to add a couple of cameras to the player vehicle.
                # We will collect the images produced by these cameras every
                # frame.

                # The default camera captures RGB images of the scene.
                camera0 = Camera('SemanticSegmentation',PostProcessing='Semantic Segmentation')
                # Set image resolution in pixels.
                camera0.set_image_size(800, 600)
                # Set its position relative to the car in meters.
                camera0.set_position(0.30, 0, 1.30)
                settings.add_sensor(camera0)

                # Let's add another camera producing ground-truth depth.
                camera1 = Camera('CameraDepth', PostProcessing='Depth')
                camera1.set_image_size(800, 600)
                camera1.set_position(0.30, 0, 1.30)
                settings.add_sensor(camera1)

                if args.lidar:
                    lidar = Lidar('Lidar32')
                    lidar.set_position(0, 0, 2.50)
                    lidar.set_rotation(0, 0, 0)
                    lidar.set(
                        Channels=32,
                        Range=50,
                        PointsPerSecond=100000,
                        RotationFrequency=10,
                        UpperFovLimit=10,
                        LowerFovLimit=-30)
                    settings.add_sensor(lidar)

            else:

                # Alternatively, we can load these settings from a file.
                with open(args.settings_filepath, 'r') as fp:
                    settings = fp.read()

            # Now we load these settings into the server. The server replies
            # with a scene description containing the available start spots for
            # the player. Here we can provide a CarlaSettings object or a
            # CarlaSettings.ini file as string.
            scene = client.load_settings(settings)

            # Choose one player start at random.
            number_of_player_starts = len(scene.player_start_spots)
            player_start = random.randint(0, max(0, number_of_player_starts - 1))

            # Notify the server that we want to start the episode at the
            # player_start index. This function blocks until the server is ready
            # to start the episode.
            print('Starting new episode...')
            client.start_episode(player_start)

            # Iterate every frame in the episode.
            for frame in range(0, frames_per_episode):

                # Read the data produced by the server this frame.
                measurements, sensor_data = client.read_data()

                # Print some of the measurements.
                print_measurements(measurements)

                # Save the images to disk if requested.
                if args.save_images_to_disk:
                    for name, measurement in sensor_data.items():
                        filename = args.out_filename_format.format(episode, name, frame)
                        measurement.save_to_disk(filename)
                
                if args.send_controls_from_nn:
                    for name,measurement in sensor_data.items():
                        img = measurement.return_image()
                        img_array = np.asarray(img)

                        # print("shape of img_array is " + str(np.shape(img_array)))
                                               
                        img_array = cv2.resize(img_array, dsize=(240, 320), interpolation=cv2.INTER_CUBIC)
                        # print("shape of img_array is " + str(np.shape(img_array)))

                        normalized_img = np.zeros((240,320,3))
                        normalized_img = cv2.normalize(img_array,normalized_img,0,255,cv2.NORM_MINMAX)
                        # print("Shape of norm_img is " + str(np.shape(normalized_img)))


                        normalized_img = normalized_img.reshape(1,240,320,3)

                        control = measurements.player_measurements.autopilot_control
                        

                        if abs(float(model.predict(normalized_img,batch_size=1))) < 50.0:
                            control.steer = float(model.predict(normalized_img,batch_size=1))
                        else:   
                            print("Entered")

                        print("Steer value is " + str(control.steer))


                       # # control.steer += random.uniform(-0.1, 0.1)
                       #  img_array = np.asarray(img)
                       #  np.reshape(img_array, img_array.shape + (1,))
                       #  control.steer =float(model.predict(img_array[None,:,:,:], batch_size=1))
                       #  print("Control steer is " + str(control.steer))
                       #  if(abs(control.steer)<0.10):
                       #      control.steer = 0.0
                       #  control.throttle = 0.5
                       #  print("Throttle value is " + str(control.throttle))
                        client.send_control(control)
                # We can access the encoded data of a given image as numpy
                # array using its "data" property. For instance, to get the
                # depth value (normalized) at pixel X, Y
                #
                #     depth_array = sensor_data['CameraDepth'].data
                #     value_at_pixel = depth_array[Y, X]
                #

                # Now we have to send the instructions to control the vehicle.
                # If we are in synchronous mode the server will pause the
                # simulation until we send this control.

                if args.autopilot:
                    control = measurements.player_measurements.autopilot_control
                    #control.steer += random.uniform(-0.1, 0.1)
                    client.send_control(control)
                    print(str(control.steer))
 def run(self):
     settings = CarlaSettings()
     settings.add_sensor(Lidar('DefaultLidar'))
     self.run_carla_client(settings, 3, 100)
def run_carla_client(args):
    # Here we will run 3 episodes with 300 frames each.
    number_of_episodes = 3
    frames_per_episode = 3000

    # We assume the CARLA server is already waiting for a client to connect at
    # host:port. To create a connection we can use the `make_carla_client`
    # context manager, it creates a CARLA client object and starts the
    # connection. It will throw an exception if something goes wrong. The
    # context manager makes sure the connection is always cleaned up on exit.
    with make_carla_client(args.host, args.port) as client:
        print('CarlaClient connected')

        for episode in range(0, number_of_episodes):
            # Start a new episode.

            if args.settings_filepath is None:

                # Create a CarlaSettings object. This object is a wrapper around
                # the CarlaSettings.ini file. Here we set the configuration we
                # want for the new episode.
                settings = CarlaSettings()
                settings.set(SynchronousMode=True,
                             SendNonPlayerAgentsInfo=True,
                             NumberOfVehicles=20,
                             NumberOfPedestrians=40,
                             WeatherId=random.choice([1, 3, 7, 8, 14]),
                             QualityLevel=args.quality_level)
                settings.randomize_seeds()

                # Now we want to add a couple of cameras to the player vehicle.
                # We will collect the images produced by these cameras every
                # frame.

                # The default camera captures RGB images of the scene.
                camera0 = Camera('CameraRGB')
                # Set image resolution in pixels.
                camera0.set_image_size(800, 600)
                # Set its position relative to the car in meters.
                camera0.set_position(0.30, 0, 1.30)
                settings.add_sensor(camera0)

                # Let's add another camera producing ground-truth depth.
                camera1 = Camera('CameraDepth', PostProcessing='Depth')
                camera1.set_image_size(800, 600)
                camera1.set_position(0.30, 0, 1.30)
                settings.add_sensor(camera1)

                if args.lidar:
                    lidar = Lidar('Lidar32')
                    lidar.set_position(0, 0, 2.50)
                    lidar.set_rotation(0, 0, 0)
                    lidar.set(Channels=32,
                              Range=50,
                              PointsPerSecond=100000,
                              RotationFrequency=10,
                              UpperFovLimit=10,
                              LowerFovLimit=-30)
                    settings.add_sensor(lidar)

            else:

                # Alternatively, we can load these settings from a file.
                with open(args.settings_filepath, 'r') as fp:
                    settings = fp.read()

            # Now we load these settings into the server. The server replies
            # with a scene description containing the available start spots for
            # the player. Here we can provide a CarlaSettings object or a
            # CarlaSettings.ini file as string.
            scene = client.load_settings(settings)

            # Choose one player start at random.
            number_of_player_starts = len(scene.player_start_spots)
            player_start = random.randint(0, max(0,
                                                 number_of_player_starts - 1))

            # Notify the server that we want to start the episode at the
            # player_start index. This function blocks until the server is ready
            # to start the episode.
            print('Starting new episode...')
            client.start_episode(player_start)

            # Iterate every frame in the episode.
            for frame in range(0, frames_per_episode):

                # Read the data produced by the server this frame.
                measurements, sensor_data = client.read_data()

                # Print some of the measurements.
                print_measurements(measurements)

                # Save the images to disk if requested.
                if args.save_images_to_disk:
                    for name, measurement in sensor_data.items():
                        filename = args.out_filename_format.format(
                            episode, name, frame)
                        measurement.save_to_disk(filename)

                # We can access the encoded data of a given image as numpy
                # array using its "data" property. For instance, to get the
                # depth value (normalized) at pixel X, Y
                #
                #     depth_array = sensor_data['CameraDepth'].data
                #     value_at_pixel = depth_array[Y, X]
                #

                # Now we have to send the instructions to control the vehicle.
                # If we are in synchronous mode the server will pause the
                # simulation until we send this control.

                if not args.autopilot:

                    client.send_control(steer=random.uniform(-1.0, 1.0),
                                        throttle=0.5,
                                        brake=0.0,
                                        hand_brake=False,
                                        reverse=False)

                else:

                    # Together with the measurements, the server has sent the
                    # control that the in-game autopilot would do this frame. We
                    # can enable autopilot by sending back this control to the
                    # server. We can modify it if wanted, here for instance we
                    # will add some noise to the steer.

                    control = measurements.player_measurements.autopilot_control
                    control.steer += random.uniform(-0.1, 0.1)
                    client.send_control(control)
Example #13
0
def run_carla_client(args):
    # Here we will run args._episode episodes with args._frames frames each.
    number_of_episodes = args._episode
    frames_per_episode = args._frames

    # create the carla client
    with make_carla_client(args.host, args.port, timeout=10000) as client:
        print('CarlaClient connected')

        for episode in range(0, number_of_episodes):

            if args.settings_filepath is None:

                settings = CarlaSettings()
                settings.set(
                    SynchronousMode=args.synchronous_mode,
                    SendNonPlayerAgentsInfo=True,
                    NumberOfVehicles=200,
                    NumberOfPedestrians=100,
                    WeatherId=random.choice([1, 3, 7, 8, 14]),
                    QualityLevel=args.quality_level)
                settings.randomize_seeds()

                # The default camera captures RGB images of the scene.
                camera0 = Camera('CameraRGB')
                # Set image resolution in pixels.
                camera0.set_image_size(1920, 640)
                # Set its position relative to the car in meters.
                camera0.set_position(2.00, 0, 1.30)
                settings.add_sensor(camera0)

                camera1 = Camera('CameraDepth', PostProcessing='Depth')
                camera1.set_image_size(1920, 640)
                camera1.set_position(2.00, 0, 1.30)
                settings.add_sensor(camera1)

                #slows down the simulation too much by processing segmentation before saving
                #camera2 = Camera('CameraSegmentation', PostProcessing='SemanticSegmentation')
                #camera2.set_image_size(1920, 640)
                #camera2.set_position(2.00, 0, 1.30)
                #settings.add_sensor(camera2)

                if args.lidar:
                    lidar = Lidar('Lidar32')
                    lidar.set_position(0, 0, 2.50)
                    lidar.set_rotation(0, 0, 0)
                    lidar.set(
                        Channels=32,
                        Range=50,
                        PointsPerSecond=100000,
                        RotationFrequency=10,
                        UpperFovLimit=10,
                        LowerFovLimit=-30)
                    settings.add_sensor(lidar)

            else:
                # Alternatively, we can load these settings from a file.
                with open(args.settings_filepath, 'r') as fp:
                    settings = fp.read()

            scene = client.load_settings(settings)

            # Choose one player start at random.
            #if intersection flag is set start near and facing an intersection
            if args.intersection_start:
                text_file = open(args.intersection_file, "r")
                player_intersection_start = text_file.read().split(' ')
                player_start = int(player_intersection_start[random.randint(0, max(0, len(player_intersection_start) - 1))])
                text_file.close()
                print("Player start index="+str(player_start))
            else:
                number_of_player_starts = len(scene.player_start_spots)
                player_start = random.randint(0, max(0, number_of_player_starts - 1))

            print('Starting new episode at %r...' % scene.map_name)
            client.start_episode(player_start)

            # create folders for current episode
            file_loc = args.file_loc_format.format(episode)
            if not os.path.exists(file_loc):
                os.makedirs(file_loc)

            file_loc_tracklets = file_loc + "/tracklets/"
            if not os.path.exists(file_loc_tracklets):
                os.makedirs(file_loc_tracklets)

            file_loc_grid = file_loc + "/gridmap/"
            if not os.path.exists(file_loc_grid):
                os.makedirs(file_loc_grid)

            print('Data saved in %r' % file_loc)

            # Iterate every frame in the episode.
            for frame in range(0, frames_per_episode):

                # Read the data produced by the server this frame.
                measurements, sensor_data = client.read_data()

                #discard episode if misbehaviour flag is set and a form of collision is detected
                if args.no_misbehaviour:
                    player_measurements = measurements.player_measurements
                    col_cars=player_measurements.collision_vehicles
                    col_ped=player_measurements.collision_pedestrians
                    col_other=player_measurements.collision_other
                    if col_cars + col_ped + col_other > 0:
                        print("MISBEHAVIOUR DETECTED! Discarding Episode... \n")
                        break

                # Print some of the measurements.
                print_measurements(measurements)

                # Save the images to disk if requested. Skip first couple of images due to setup time.
                if args.save_images_to_disk and frame > 19:
                    player_measurements = measurements.player_measurements

                    # process player odometry
                    yaw = ((player_measurements.transform.rotation.yaw - yaw_shift - 180) % 360) - 180

                    #calculate yaw_rate
                    game_time = np.int64(measurements.game_timestamp)
                    time_diff = (game_time - prev_time) / 1000 # ms -> sec
                    prev_time = game_time
                    if time_diff == 0:
                        yaw_rate = 0
                    else:
                        yaw_rate = (180 - abs(abs(yaw - yaw_old) - 180))/time_diff * np.sign(yaw-yaw_old)

                    yaw_old = yaw

                    #write odometry data to .txt
                    with open(file_loc+"odometry_t_mus-x_m-y_m-yaw_deg-yr_degs-v_ms.txt", "a") as text_file:
                        text_file.write("%d %f %f %f %f %f\n" % \
                           (round(args.start_time+measurements.game_timestamp),
                            player_measurements.transform.location.x - shift_x,
                            player_measurements.transform.location.y - shift_y,
                            -yaw,
                            -yaw_rate,
                            player_measurements.forward_speed))

                    # need modified saver to save converted segmentation
                    for name, measurement in sensor_data.items():
                        filename = args.out_filename_format.format(episode, name, frame)
                        if name == 'CameraSegmentation':
                            measurement.save_to_disk_converted(filename)
                        else:
                            measurement.save_to_disk(filename)

                    with open(file_loc_tracklets+str(round(args.start_time+measurements.game_timestamp))+".txt", "w") as text_file:
                        im = Image.new('L', (256*6, 256*6), (127))
                        shift = 256*6/2
                        draw = ImageDraw.Draw(im)
                        # create occupancy map and save participant info in tracklet txt file similar to KITTI
                        for agent in measurements.non_player_agents:
                            if agent.HasField('vehicle') or agent.HasField('pedestrian'):
                                participant = agent.vehicle if agent.HasField('vehicle') else agent.pedestrian
                                angle = cart2pol(participant.transform.orientation.x, participant.transform.orientation.y)
                                text_file.write("%d %f %f %f %f %f\n" % \
                                 (agent.id,
                                  participant.transform.location.x,
                                  participant.transform.location.y,
                                  angle,
                                  participant.bounding_box.extent.x,
                                  participant.bounding_box.extent.y))
                                polygon = agent2world(participant, angle)
                                polygon = world2player(polygon, math.radians(-yaw), player_measurements.transform)
                                polygon = player2image(polygon, shift, multiplier=25)
                                polygon = [tuple(row) for row in polygon.T]

                                draw.polygon(polygon, 0, 0)
                        im = im.resize((256,256), Image.ANTIALIAS)
                        im = im.rotate(imrotate)
                        im.save(file_loc_grid + 'gridmap_'+ str(round(args.start_time+measurements.game_timestamp)) +'_occupancy' + '.png')

                else:
                    # get first values
                    yaw_shift = measurements.player_measurements.transform.rotation.yaw
                    yaw_old = ((yaw_shift - 180) % 360) - 180
                    imrotate = round(yaw_old)+90
                    shift_x = measurements.player_measurements.transform.location.x
                    shift_y = measurements.player_measurements.transform.location.y
                    prev_time = np.int64(measurements.game_timestamp)


                if not args.autopilot:
                    client.send_control(
                        steer=random.uniform(-1.0, 1.0),
                        throttle=0.5,
                        brake=0.0,
                        hand_brake=False,
                        reverse=False)
                else:
                    control = measurements.player_measurements.autopilot_control
                    control.steer += random.uniform(-0.01, 0.01)
                    client.send_control(control)
Example #14
0
def run_carla_client(args):
    number_of_episodes = args.episodes
    frames_per_episode = args.frames
    
    CAMERA_RGB_WIDTH = args.camera_rgb_width
    CAMERA_RGB_HEIGHT = args.camera_rgb_height
    # We assume the CARLA server is already waiting for a client to connect at
    # host:port. To create a connection we can use the `make_carla_client`
    # context manager, it creates a CARLA client object and starts the
    # connection. It will throw an exception if something goes wrong. The
    # context manager makes sure the connection is always cleaned up on exit.
    with make_carla_client(args.host, args.port) as client:
        print('CarlaClient connected')
        
        episode_start_time = time.perf_counter()
        for episode in range(0, number_of_episodes):
            # Start a new episode.

            if args.settings_filepath is None:

                # Create a CarlaSettings object. This object is a wrapper around
                # the CarlaSettings.ini file. Here we set the configuration we
                # want for the new episode.
                settings = CarlaSettings()
                settings.set(
                    SynchronousMode=True,
                    SendNonPlayerAgentsInfo=True,
                    NumberOfVehicles=20,
                    NumberOfPedestrians=40,
                    WeatherId=1,
                    # WeatherId=random.choice([1, 3, 7, 8, 14]),
                    QualityLevel=args.quality_level)
                settings.randomize_seeds()

                # Now we want to add a couple of cameras to the player vehicle.
                # We will collect the images produced by these cameras every
                # frame.

                # The center camera captures RGB images of the scene.
                camera_center = Camera('RGBCenter')
                # Set image resolution in pixels.
                camera_center.set_image_size(CAMERA_RGB_WIDTH, CAMERA_RGB_HEIGHT)
                # Set its position relative to the car in meters.
                # TODO: Wish there was a better way to know how these values will actually translate in-game
                camera_center.set_position(0.30, 0, 1.30)
                settings.add_sensor(camera_center)

                # Left RGB camera
                camera_left = Camera('RGBLeft')
                camera_left.set_image_size(CAMERA_RGB_WIDTH, CAMERA_RGB_HEIGHT)
                camera_left.set_position(0.30, -0.75, 1.30)
                settings.add_sensor(camera_left)

                # Right RGB camera
                camera_right = Camera('RGBRight')
                camera_right.set_image_size(CAMERA_RGB_WIDTH, CAMERA_RGB_HEIGHT)
                camera_right.set_position(0.30, 0.75, 1.30)
                settings.add_sensor(camera_right)
                
                # Optional depth camera
                if args.depth:
                    camera1 = Camera('CameraDepth', PostProcessing='Depth')
                    camera1.set_image_size(CAMERA_RGB_WIDTH, CAMERA_RGB_HEIGHT)
                    camera1.set_position(0.30, 0, 1.30)
                    settings.add_sensor(camera1)

                # And maybe a crutch
                if args.lidar:
                    lidar = Lidar('Lidar32')
                    lidar.set_position(0, 0, 2.50)
                    lidar.set_rotation(0, 0, 0)
                    lidar.set(
                        Channels=32,
                        Range=50,
                        PointsPerSecond=100000,
                        RotationFrequency=10,
                        UpperFovLimit=10,
                        LowerFovLimit=-30)
                    settings.add_sensor(lidar)

            else:

                # Alternatively, we can load these settings from a file.
                with open(args.settings_filepath, 'r') as fp:
                    settings = fp.read()

            # Now we load these settings into the server. The server replies
            # with a scene description containing the available start spots for
            # the player. Here we can provide a CarlaSettings object or a
            # CarlaSettings.ini file as string.
            scene = client.load_settings(settings)

            # Choose one player start at random.
            number_of_player_starts = len(scene.player_start_spots)
            player_start = random.randint(0, max(0, number_of_player_starts - 1))

            # Notify the server that we want to start the episode at the
            # player_start index. This function blocks until the server is ready
            # to start the episode.
            print('Starting new episode...')
            client.start_episode(player_start)

            
            # Initialize label dict in the episode.
            episode_label = {}

            # Iterate every frame in the episode.
            for frame in range(0, frames_per_episode):

                # Read the data produced by the server this frame.
                measurements, sensor_data = client.read_data()

                # Get autopilot control
                autopilot_control = measurements.player_measurements.autopilot_control
                # control.steer += random.uniform(-0.1, 0.1)

                # Print some of the measurements.
                print_measurements(measurements)

                # Save the images to disk if requested.
                if args.save_images_to_disk:
                    for name, measurement in sensor_data.items():
                        filename = args.out_filename_format.format(episode, name, frame)
                        measurement.save_to_disk(filename)

                        # Save label
                        label_key = 'episode_{:0>4d}/{:s}/{:0>6d}'.format(episode, name, frame)
                        control_dict = generate_control_dict(autopilot_control)
                        if name == 'RGBCenter':
                            # no modifications to autopilot control
                            episode_label[label_key] = control_dict

                            img = Image.open(filename + '.png')
                            translated_img, translated_steering_angle = translate_img(img, control_dict['steer'], 100, 0)
                            control_dict['steer'] = translated_steering_angle

                            augmented_filename = args.out_filename_format.format(episode, name, frame) + '_augmented.png'
                            augmented_label_key = label_key + '_augmented'
                            translated_img.save(augmented_filename)
                            episode_label[augmented_label_key] = control_dict

                        elif name == 'RGBRight':
                            control_dict['steer'] = -0.25
                            episode_label[label_key] = control_dict

                            img = Image.open(filename + '.png')
                            translated_img, translated_steering_angle = translate_img(img, control_dict['steer'], 100, 0)
                            control_dict['steer'] = translated_steering_angle

                            augmented_filename = args.out_filename_format.format(episode, name, frame) + '_augmented.png'
                            augmented_label_key = label_key + '_augmented'
                            translated_img.save(augmented_filename)
                            episode_label[augmented_label_key] = control_dict

                        elif name == 'RGBLeft':
                            control_dict['steer'] = 0.25
                            episode_label[label_key] = control_dict

                            img = Image.open(filename + '.png')
                            translated_img, translated_steering_angle = translate_img(img, control_dict['steer'], 100, 0)
                            control_dict['steer'] = translated_steering_angle

                            augmented_filename = args.out_filename_format.format(episode, name, frame) + '_augmented.png'
                            augmented_label_key = label_key + '_augmented'
                            translated_img.save(augmented_filename)
                            episode_label[augmented_label_key] = control_dict

                        else:
                            raise Exception('Unknown Sensor')


                # We can access the encoded data of a given image as numpy
                # array using its "data" property. For instance, to get the
                # depth value (normalized) at pixel X, Y
                #
                #     depth_array = sensor_data['CameraDepth'].data
                #     value_at_pixel = depth_array[Y, X]
                #

                # Now we have to send the instructions to control the vehicle.
                # If we are in synchronous mode the server will pause the
                # simulation until we send this control.

                if not args.autopilot:
                    print('sending dummy control')
                    client.send_control(
                        steer=random.uniform(-1.0, 1.0),
                        throttle=0.5,
                        brake=0.0,
                        hand_brake=False,
                        reverse=False)

                else:

                    # Together with the measurements, the server has sent the
                    # control that the in-game autopilot would do this frame. We
                    # can enable autopilot by sending back this control to the
                    # server. We can modify it if wanted, here for instance we
                    # will add some noise to the steer.

                    # control = measurements.player_measurements.autopilot_control
                    # TODO: Does random steering jitter add human-ness?
                    # control.steer += random.uniform(-0.1, 0.1)
                    client.send_control(autopilot_control)

            # Save episode label dict.
            with open(args.out_labelname_format.format(episode), 'wb') as f:
                pickle.dump(episode_label, f, pickle.HIGHEST_PROTOCOL)

            print(f'epsiode elapsed time: {time.perf_counter() - episode_start_time:0.2f}')
Example #15
0
def run_carla_client(args):
	# Here we will run 1 episodes with 1000 frames.
	number_of_episodes = 1
	frames_per_episode = 1000

	# We assume the CARLA server is already waiting for a client to connect at
	# host:port. To create a connection we can use the `make_carla_client`
	# context manager, it creates a CARLA client object and starts the
	# connection. It will throw an exception if something goes wrong. The
	# context manager makes sure the connection is always cleaned up on exit.
	with make_carla_client(args.host, args.port) as client:
		print('CarlaClient connected')

		for episode in range(0, number_of_episodes):
			# Start a new episode.

			if args.settings_filepath is None:

				# Create a CarlaSettings object. This object is a wrapper around
				# the CarlaSettings.ini file. Here we set the configuration we
				# want for the new episode.
				settings = CarlaSettings()
				settings.set(
					SynchronousMode=True,
					SendNonPlayerAgentsInfo=True,
					NumberOfVehicles=152,
					NumberOfPedestrians=500,
					WeatherId= args.weatherId,
					QualityLevel=args.quality_level)

				# Now we want to add a few cameras to the player vehicle.
				# We will collect the images produced by these cameras every
				# frame.

				# The default camera captures RGB images of the scene.
				camera0 = Camera('RGB')
				# Set image resolution in pixels.
				camera0.set_image_size(800, 600)
				# Set its position relative to the car in meters.
				camera0.set_position(1.80, 0, 1.30)
				settings.add_sensor(camera0)

				# Let's add another RGB camera in the back of the car
				camera0_b = Camera('RGB_back')
				camera0_b.set_image_size(800, 600)
				camera0_b.set_position(-1.70, 0, 1.30)
				camera0_b.set_rotation(0, 180, 0)
				settings.add_sensor(camera0_b)

				# Let's add another camera producing ground-truth depth.
				camera1 = Camera('Depth', PostProcessing='Depth')
				camera1.set_image_size(800, 600)
				camera1.set_position(1.80, 0, 1.30)
				settings.add_sensor(camera1)

				# Let's add another camera producing ground-truth depth for the back.
				camera1_b = Camera('Depth_back', PostProcessing='Depth')
				camera1_b.set_image_size(800, 600)
				camera1_b.set_position(-1.70, 0, 1.30)
				camera1_b.set_rotation(0, 180, 0)
				settings.add_sensor(camera1_b)

				# Let's add another camera producing ground-truth semantic segmentation.
				camera2 = Camera('SemanticSegmentation', PostProcessing='SemanticSegmentation')
				camera2.set_image_size(800, 600)
				camera2.set_position(1.80, 0, 1.30)
				settings.add_sensor(camera2)

				# Let's add another camera producing ground-truth semantic segmentation for the back.
				camera2_b = Camera('SemanticSegmentation_back', PostProcessing='SemanticSegmentation')
				camera2_b.set_image_size(800, 600) 
				camera2_b.set_position(-1.70, 0, 1.30)
				camera2_b.set_rotation(0, 180, 0)
				settings.add_sensor(camera2_b)

				if args.lidar:
					lidar = Lidar('Lidar32')
					lidar.set_position(0, 0, 2.50)
					lidar.set_rotation(0, 0, 0)
					lidar.set(
						Channels=32,
						Range=50,
						PointsPerSecond=100000,
						RotationFrequency=10,
						UpperFovLimit=10,
						LowerFovLimit=-30)
					settings.add_sensor(lidar)

			else:

				# Alternatively, we can load these settings from a file.
				with open(args.settings_filepath, 'r') as fp:
					settings = fp.read()

			# Now we load these settings into the server. The server replies
			# with a scene description containing the available start spots for
			# the player. Here we can provide a CarlaSettings object or a
			# CarlaSettings.ini file as string.
			scene = client.load_settings(settings)

			# Choose one player start.
			number_of_player_starts = len(scene.player_start_spots)
			player_start = args.playerStart
			print('player_start', player_start)  
			print('weather_Id', args.weatherId)          

			# Notify the server that we want to start the episode at the
			# player_start index. This function blocks until the server is ready
			# to start the episode.
			print('Starting new episode...')
			client.start_episode(player_start)

			# Iterate every frame in the episode.
			for frame in range(0, frames_per_episode):

				print('Frame : ', frame)

				# Read the data produced by the server this frame.
				measurements, sensor_data = client.read_data()

				# Save Trajectory
				save_trajectory(frame,measurements)

				# Save the images to disk if requested. We save 1 frame out of 10, from frame 30 on. 
				# In the first frames the car is 'flying' and the lightning is not correct.
				if args.save_images_to_disk and frame % 10 == 0 and frame > 29:
					for name, measurement in sensor_data.items():
						filename = args.out_filename_format.format(episode, name, frame)
						measurement.save_to_disk(filename)

				# Now we have to send the instructions to control the vehicle.
				# If we are in synchronous mode the server will pause the
				# simulation until we send this control.
				if not args.autopilot:
					client.send_control(
						steer=random.uniform(-1.0, 1.0),
						throttle=0.5,
						brake=0.0,
						hand_brake=False,
						reverse=False)
				else:
					# Together with the measurements, the server has sent the
					# control that the in-game autopilot would do this frame. We
					# can enable autopilot by sending back this control to the
					# server.
					control = measurements.player_measurements.autopilot_control
					save_control(frame,control)
					client.send_control(control)
Example #16
0
def run_carla_client(args):
    # Here we will run 3 episodes with 300 frames each.
    number_of_episodes = 5
    cut_per_episode = 40
    frames_per_cut = 200

    # We assume the CARLA server is already waiting for a client to connect at
    # host:port. To create a connection we can use the `make_carla_client`
    # context manager, it creates a CARLA client object and starts the
    # connection. It will throw an exception if something goes wrong. The
    # context manager makes sure the connection is always cleaned up on exit.
    with make_carla_client(args.host, args.port) as client:
        print('CarlaClient connected')
        for episode in range(0, number_of_episodes):
            print("input any key to continue...")
            start = input()
            # each episode dir store a set of traindata.  if dir not existed, then make it
            pathdir = '/home/kadn/dataTrain/episode_{:0>3}'.format(episode)
            mkdir(pathdir)

            # Create a CarlaSettings object. This object is a wrapper around
            # the CarlaSettings.ini file. Here we set the configuration we
            # want for the new episode.
            settings = CarlaSettings()
            settings.set(
                SynchronousMode=True,
                SendNonPlayerAgentsInfo=True,
                NumberOfVehicles=20,
                NumberOfPedestrians=40,
                # WeatherId=random.choice([1, 3, 7, 8, 14]),
                WeatherId=1,
                QualityLevel=args.quality_level)
            settings.randomize_seeds()
            # Now we want to add a couple of cameras to the player vehicle.
            # We will collect the images produced by these cameras every
            # frame.

            # The default camera captures RGB images of the scene.
            camera0 = Camera('CameraRGB')
            # Set image resolution in pixels.
            camera0.set_image_size(88, 200)
            # Set its position relative to the car in meters.
            camera0.set_position(0.30, 0, 1.30)
            settings.add_sensor(camera0)

            # Let's add another camera producing ground-truth depth.
            camera1 = Camera('CameraDepth', PostProcessing='Depth')
            camera1.set_image_size(200, 88)
            camera1.set_position(0.30, 0, 1.30)
            settings.add_sensor(camera1)

            camera2 = Camera('CameraSemSeg',
                             PostProcessing='SemanticSegmentation')
            camera2.set_image_size(88, 200)
            camera2.set_position(2.0, 0.0, 1.4)
            camera2.set_rotation(0.0, 0.0, 0.0)
            settings.add_sensor(camera2)
            # if args.lidar:
            lidar = Lidar('Lidar32')
            lidar.set_position(0, 0, 2.50)
            lidar.set_rotation(0, 0, 0)
            lidar.set(Channels=0,
                      Range=30,
                      PointsPerSecond=200000,
                      RotationFrequency=10,
                      UpperFovLimit=0,
                      LowerFovLimit=0)
            settings.add_sensor(lidar)

            # else:
            #
            #     # Alternatively, we can load these settings from a file.
            #     with open(args.settings_filepath, 'r') as fp:
            #         settings = fp.read()

            # Now we load these settings into the server. The server replies
            # with a scene description containing the available start spots for
            # the player. Here we can provide a CarlaSettings object or a
            # CarlaSettings.ini file as string.
            scene = client.load_settings(settings)

            # Choose one player start at random.
            # number_of_player_starts = len(scene.player_start_spots)
            # player_start = random.randint(0, max(0, number_of_player_starts - 1))
            player_start = 1

            # Notify the server that we want to start the episode at the
            # player_start index. This function blocks until the server is ready
            # to start the episode.
            print('Starting new episode at %r...' % scene.map_name)
            # Start a new episode.
            client.start_episode(player_start)

            for cut_per_episode in range(0, cut_per_episode):
                num = fileCounter(pathdir)
                filename = "data_{:0>6}.h5".format(num)
                filepath = pathdir + '/' + filename
                f = h5py.File(filepath, "w")
                rgb_file = f.create_dataset("rgb", (200, 88, 200), np.uint8)
                seg_file = f.create_dataset("seg", (200, 88, 200), np.uint8)
                lidar_file = f.create_dataset('lidar', (200, 200, 200),
                                              np.uint8)
                startendpoint = f.create_dataset('startend', (1, 2),
                                                 np.float32)
                targets_file = f.create_dataset("targets", (200, 28),
                                                np.float32)
                index_file = 0

                # Iterate every frame in the episode.
                for frame in range(0, frames_per_cut):
                    # Read the data produced by the server this frame.
                    measurements, sensor_data = client.read_data()

                    # get data and store
                    sensors, targets_data = record_train_data(
                        measurements, sensor_data)
                    rgb_file[:, :, index_file] = sensors['rgb']
                    seg_file[:, :, index_file] = sensors['seg']
                    lidar_file[:, :, index_file] = sensors['lidar']
                    targets_file[index_file, :] = targets_data
                    index_file += 1

                    # We can access the encoded data of a given image as numpy
                    # array using its "data" property. For instance, to get the
                    # depth value (normalized) at pixel X, Y
                    #
                    #     depth_array = sensor_data['CameraDepth'].data
                    #     value_at_pixel = depth_array[Y, X]
                    #

                    # Now we have to send the instructions to control the vehicle.
                    # If we are in synchronous mode the server will pause the
                    # simulation until we send this control.

                    if args.autopilot:

                        client.send_control(steer=0,
                                            throttle=0.8,
                                            brake=0.0,
                                            hand_brake=False,
                                            reverse=False)

                    else:

                        # Together with the measurements, the server has sent the
                        # control that the in-game autopilot would do this frame. We
                        # can enable autopilot by sending back this control to the
                        # server. We can modify it if wanted, here for instance we
                        # will add some noise to the steer.

                        control = measurements.player_measurements.autopilot_control
                        control.steer += random.uniform(-0.05, 0.05)
                        client.send_control(control)
Example #17
0
def run_carla_client(args):
    if not args.autopilot:
        lane_detection = Popen(
            [
                "/home/minghanz/lane-detection/newcheck/SFMotor/lane detection/lane-detection"
            ],
            cwd="/home/minghanz/lane-detection/newcheck/SFMotor/lane detection"
        )

    # Here we will run 3 episodes with 300 frames each.
    # number_of_episodes = 3
    frames_per_episode = 10000

    # We assume the CARLA server is already waiting for a client to connect at
    # host:port. To create a connection we can use the `make_carla_client`
    # context manager, it creates a CARLA client object and starts the
    # connection. It will throw an exception if something goes wrong. The
    # context manager makes sure the connection is always cleaned up on exit.
    with make_carla_client(args.host, args.port) as client:
        print('CarlaClient connected')

        # for episode in range(0, number_of_episodes):
        # Start a new episode.

        if args.settings_filepath is None:

            # Create a CarlaSettings object. This object is a wrapper around
            # the CarlaSettings.ini file. Here we set the configuration we
            # want for the new episode.
            settings = CarlaSettings()
            settings.set(
                SynchronousMode=True,
                SendNonPlayerAgentsInfo=True,
                NumberOfVehicles=20,  # 20
                NumberOfPedestrians=40,  # 40
                WeatherId=1,  # random.choice([1, 3, 7, 8, 14]),
                QualityLevel=args.quality_level)
            settings.randomize_seeds()

            # Now we want to add a couple of cameras to the player vehicle.
            # We will collect the images produced by these cameras every
            # frame.

            # The default camera captures RGB images of the scene.
            camera0 = Camera('CameraRGB')
            # Set image resolution in pixels.
            camera0.set_image_size(800, 600)
            # Set its position relative to the car in meters.
            camera0.set_position(0.30, -3.0, 2.30)
            settings.add_sensor(camera0)

            # # Let's add another camera producing ground-truth depth.
            # camera1 = Camera('CameraDepth', PostProcessing='Depth')
            # camera1.set_image_size(800, 600)
            # camera1.set_position(0.30, 0, 1.30)
            # settings.add_sensor(camera1)

            if args.lidar:
                lidar = Lidar('Lidar32')
                lidar.set_position(0, 0, 2.50)
                lidar.set_rotation(0, 0, 0)
                lidar.set(Channels=32,
                          Range=50,
                          PointsPerSecond=100000,
                          RotationFrequency=10,
                          UpperFovLimit=10,
                          LowerFovLimit=-30)
                settings.add_sensor(lidar)

        else:

            # Alternatively, we can load these settings from a file.
            with open(args.settings_filepath, 'r') as fp:
                settings = fp.read()

        # Now we load these settings into the server. The server replies
        # with a scene description containing the available start spots for
        # the player. Here we can provide a CarlaSettings object or a
        # CarlaSettings.ini file as string.
        scene = client.load_settings(settings)

        # Choose one player start at random.
        # number_of_player_starts = len(scene.player_start_spots)
        # player_start = random.randint(0, max(0, number_of_player_starts - 1))
        player_start = 1

        # Notify the server that we want to start the episode at the
        # player_start index. This function blocks until the server is ready
        # to start the episode.
        print('Starting new episode...')
        client.start_episode(player_start)
        px_l = -1
        py_l = -1
        # Iterate every frame in the episode.
        for frame in range(0, frames_per_episode):

            # Read the data produced by the server this frame.
            measurements, sensor_data = client.read_data()

            # Print some of the measurements.
            #print_measurements(measurements)

            # Save the images to disk if requested.
            if args.save_images_to_disk:
                for name, measurement in sensor_data.items():
                    filename = args.out_filename_format.format(
                        0, name, frame)  # episode
                    measurement.save_to_disk(filename)

            # We can access the encoded data of a given image as numpy
            # array using its "data" property. For instance, to get the
            # depth value (normalized) at pixel X, Y
            #
            #     depth_array = sensor_data['CameraDepth'].data
            #     value_at_pixel = depth_array[Y, X]
            #

            # Now we have to send the instructions to control the vehicle.
            # If we are in synchronous mode the server will pause the
            # simulation until we send this control.

            if not args.autopilot:
                image = sensor_data['CameraRGB'].data
                lane_coef = send_image(image)
                print("lane_coef: ", lane_coef)

            if not args.autopilot:
                player_measurements = measurements.player_measurements
                pos_x = player_measurements.transform.location.x
                pos_y = player_measurements.transform.location.y
                yaw = player_measurements.transform.rotation.yaw
                speed = player_measurements.forward_speed * 3.6

                #Traffic Light
                # print('TrafficLight:-----------------')
                # for agent in measurements.non_player_agents:
                #     if agent.HasField('traffic_light'):
                #         print(agent.id)
                #         print(agent.traffic_light.transform)
                #         print(agent.traffic_light.state)
                #         print('-----------------')
                #         break

                #Traffic Light End

                if px_l == -1:
                    px_l = pos_x
                if py_l == -1:
                    py_l = pos_y
                #delta_x = pos_x-px_l
                #delta_y = pos_y-py_l
                st = 0

                if speed > 28:
                    br = (speed - 28) * 0.1
                    thr = 0
                else:
                    br = 0
                    thr = (28 - speed) * 0.05 + 0.6

                L = sensor_data["Lidar32"].point_cloud.array
                L = L[L[:, 2] < 2.1]
                L = L[L[:, 1] < -2]
                if len(L) == 0:
                    come_back = True
                else:
                    come_back = False

                # theta1 = math.atan2((lane_coef[1]+lane_coef[4])/2,1)
                # print(theta1)
                k1 = (lane_coef[1] + lane_coef[4]) / 2
                print("lane_k:", k1)
                # print("yaw:",yaw)
                k = math.tan(-yaw / 180 * math.pi)
                print("real_k:", k)
                print("ratio k:", k / k1)
                k_obs = k1 * 2.5
                theta = math.atan(k_obs)
                print("theta_obs: ", theta)
                print("theta_tru: ", -yaw / 180 * math.pi)
                print("theta_dif: ", theta + yaw / 180 * math.pi)
                # theta = -yaw/180*math.pi
                #theta = math.atan2(-0.5*(lane_coef[1]+lane_coef[4])/2,1)
                #print("diff:",(theta-theta1)*100)
                delta_x = speed * math.cos(theta) * 0.1 / 3.6
                if not come_back:
                    X = L[:, 0]
                    Y = -L[:, 1]
                    XX = math.cos(theta) * X - math.sin(theta) * Y
                    YY = math.sin(theta) * X + math.cos(theta) * Y
                    plt.plot(XX, YY, 'ro')
                    if min(YY) > delta_x * 30:
                        come_back = True
                #else:
                #theta = 0
                #plt.plot([106-pos_y,106-pos_y],[-90-pos_x,100-pos_x])
                #plt.plot([109.5-pos_y,109.5-pos_y],[-90-pos_x,100-pos_x])

                # plot lanes detected
                offset = 2.2
                if lane_coef[0] == 0:
                    expand = 0
                else:
                    expand = 0.4
                plt.plot([
                    lane_coef[0] - offset - expand,
                    lane_coef[0] - offset - expand
                ], [-90 - pos_x, 200 - pos_x])
                plt.plot([lane_coef[3] - offset, lane_coef[3] - offset],
                         [-90 - pos_x, 200 - pos_x])
                plt.plot([
                    lane_coef[3] + lane_coef[3] - lane_coef[0] - offset +
                    expand, lane_coef[3] + lane_coef[3] - lane_coef[0] -
                    offset + expand
                ], [-90 - pos_x, 200 - pos_x])

                # plt.plot([104-pos_y,104-pos_y],[-90-pos_x,200-pos_x])
                # plt.plot([108-pos_y,108-pos_y],[-90-pos_x,200-pos_x])
                # plt.plot([112-pos_y,112-pos_y],[-90-pos_x,200-pos_x])

                plt.plot([0], [0], '*')
                plt.axis([-5, 5, -10, 50])
                print("tx:", 109.5 - pos_y)

                if come_back:
                    tx = lane_coef[3] - 0.8  #109.5-pos_y

                    ty = delta_x * 30
                else:
                    tx = min(XX) - 3
                    ty = delta_x * 30
                    if ty > min(YY) and tx > 1 and np.sqrt(tx * tx +
                                                           ty * ty) > 5:
                        ty = min(YY) - 1

                if speed < 2:
                    st = 0
                else:
                    st = -(math.atan2(-tx, ty) - theta) * 0.8

                plt.plot([tx], [ty], 'r*')

                if pos_x > 70:
                    br = 1
                    thr = 0
                    st = 0

                if frame == 0:
                    plt.show(block=False)
                    plt.pause(0.01)
                    plt.clf()
                else:
                    plt.show(block=False)
                    plt.pause(0.01)
                    plt.clf()

                if abs(st) < 0.001:
                    st = 0
                    #st = (2-pos_y)*0.01
                print('Steering:', st)
                client.send_control(
                    #steer=random.uniform(-1.0, 1.0),
                    steer=st,
                    throttle=thr,
                    brake=br,
                    hand_brake=False,
                    reverse=False)
                px_l = pos_x
                py_l = pos_y
            else:

                # Together with the measurements, the server has sent the
                # control that the in-game autopilot would do this frame. We
                # can enable autopilot by sending back this control to the
                # server. We can modify it if wanted, here for instance we
                # will add some noise to the steer.
                control = measurements.player_measurements.autopilot_control
                control.steer += random.uniform(-0.1, 0.1)
                client.send_control(control)

    lane_detection.kill()
Example #18
0
def run_carla_client(args):
    # Here we will run 3 episodes with 300 frames each.
    number_of_episodes = 3
    frames_per_episode = 300

    # We assume the CARLA server is already waiting for a client to connect at
    # host:port. To create a connection we can use the `make_carla_client`
    # context manager, it creates a CARLA client object and starts the
    # connection. It will throw an exception if something goes wrong. The
    # context manager makes sure the connection is always cleaned up on exit.
    with make_carla_client(args.host, args.port) as client:
        print('CarlaClient connected')

        for episode in range(0, number_of_episodes):
            # Start a new episode.

            if args.settings_filepath is None:

                # Create a CarlaSettings object. This object is a wrapper around
                # the CarlaSettings.ini file. Here we set the configuration we
                # want for the new episode.
                settings = CarlaSettings()
                settings.set(
                    SynchronousMode=True,
                    SendNonPlayerAgentsInfo=True,
                    NumberOfVehicles=20,
                    NumberOfPedestrians=40,
                    WeatherId=random.choice([1, 3, 7, 8, 14]),
                    QualityLevel=args.quality_level)
                settings.randomize_seeds()

                # Now we want to add a couple of cameras to the player vehicle.
                # We will collect the images produced by these cameras every
                # frame.

                # The default camera captures RGB images of the scene.
                camera0 = Camera('CameraRGB')
                # Set image resolution in pixels.
                camera0.set_image_size(800, 600)
                # Set its position relative to the car in meters.
                camera0.set_position(0.30, 0, 1.30)
                settings.add_sensor(camera0)

                # Let's add another camera producing ground-truth depth.
                camera1 = Camera('CameraDepth', PostProcessing='Depth')
                camera1.set_image_size(800, 600)
                camera1.set_position(0.30, 0, 1.30)
                settings.add_sensor(camera1)

                if args.lidar:
                    lidar = Lidar('Lidar32')
                    lidar.set_position(0, 0, 2.50)
                    lidar.set_rotation(0, 0, 0)
                    lidar.set(
                        Channels=32,
                        Range=50,
                        PointsPerSecond=100000,
                        RotationFrequency=10,
                        UpperFovLimit=10,
                        LowerFovLimit=-30)
                    settings.add_sensor(lidar)

            else:

                # Alternatively, we can load these settings from a file.
                with open(args.settings_filepath, 'r') as fp:
                    settings = fp.read()

            # Now we load these settings into the server. The server replies
            # with a scene description containing the available start spots for
            # the player. Here we can provide a CarlaSettings object or a
            # CarlaSettings.ini file as string.
            scene = client.load_settings(settings)

            # Choose one player start at random.
            number_of_player_starts = len(scene.player_start_spots)
            player_start = random.randint(0, max(0, number_of_player_starts - 1))

            # Notify the server that we want to start the episode at the
            # player_start index. This function blocks until the server is ready
            # to start the episode.
            print('Starting new episode at %r...' % scene.map_name)
            client.start_episode(player_start)

            # Iterate every frame in the episode.
            for frame in range(0, frames_per_episode):

                # Read the data produced by the server this frame.
                measurements, sensor_data = client.read_data()

                # Print some of the measurements.
                print_measurements(measurements)

                # Save the images to disk if requested.
                if args.save_images_to_disk:
                    for name, measurement in sensor_data.items():
                        filename = args.out_filename_format.format(episode, name, frame)
                        measurement.save_to_disk(filename)

                # We can access the encoded data of a given image as numpy
                # array using its "data" property. For instance, to get the
                # depth value (normalized) at pixel X, Y
                #
                #     depth_array = sensor_data['CameraDepth'].data
                #     value_at_pixel = depth_array[Y, X]
                #

                # Now we have to send the instructions to control the vehicle.
                # If we are in synchronous mode the server will pause the
                # simulation until we send this control.

                if not args.autopilot:

                    client.send_control(
                        steer=random.uniform(-1.0, 1.0),
                        throttle=0.5,
                        brake=0.0,
                        hand_brake=False,
                        reverse=False)

                else:

                    # Together with the measurements, the server has sent the
                    # control that the in-game autopilot would do this frame. We
                    # can enable autopilot by sending back this control to the
                    # server. We can modify it if wanted, here for instance we
                    # will add some noise to the steer.

                    control = measurements.player_measurements.autopilot_control
                    control.steer += random.uniform(-0.1, 0.1)
                    client.send_control(control)
Example #19
0
def run_carla_client(args):
    # Here we will run 1 episodes with 1000 frames each.
    number_of_episodes = 1
    frames_per_episode = 2001
    # 原来1000

    # We assume the CARLA server is already waiting for a client to connect at
    # host:port. To create a connection we can use the `make_carla_client`
    # context manager, it creates a CARLA client object and starts the
    # connection. It will throw an exception if something goes wrong. The
    # context manager makes sure the connection is always cleaned up on exit.
    with make_carla_client(args.host, args.port) as client:
        print('CarlaClient connected')
        for episode in range(0, number_of_episodes):
            # Start a new episode.
            if args.settings_filepath is None:
                # Create a CarlaSettings object. This object is a wrapper around
                # the CarlaSettings.ini file. Here we set the configuration we
                # want for the new episode.
                settings = CarlaSettings()
                settings.set(SynchronousMode=True,
                             SendNonPlayerAgentsInfo=True,
                             NumberOfVehicles=args.NumberOfVehicles,
                             NumberOfPedestrians=args.NumberOfPedestrians,
                             WeatherId=args.weatherId,
                             QualityLevel=args.quality_level)

                # Now we want to add a few cameras to the player vehicle.
                # We will collect the images produced by these cameras every
                # frame.

                # The default camera captures RGB images of the scene.
                camera0 = Camera('RGB')
                # Set image resolution in pixels.
                camera0.set_image_size(args.image_width, args.image_width)
                # Set its position relative to the car in meters.
                camera0.set_position(1.80, 0, 1.30)
                settings.add_sensor(camera0)

                # Let's add another camera producing ground-truth depth.
                camera1 = Camera('Depth', PostProcessing='Depth')
                camera1.set_image_size(args.image_width, args.image_width)
                camera1.set_position(1.80, 0, 1.30)
                settings.add_sensor(camera1)

                # Let's add another camera producing ground-truth semantic segmentation.
                camera2 = Camera('SemanticSegmentation',
                                 PostProcessing='SemanticSegmentation')
                camera2.set_image_size(args.image_width, args.image_width)
                camera2.set_position(1.80, 0, 1.30)
                settings.add_sensor(camera2)

                if args.lidar:
                    lidar = Lidar('Lidar32')
                    lidar.set_position(0, 0, 2.50)
                    lidar.set_rotation(0, 0, 0)
                    lidar.set(Channels=32,
                              Range=50,
                              PointsPerSecond=100000,
                              RotationFrequency=10,
                              UpperFovLimit=10,
                              LowerFovLimit=-30)
                    settings.add_sensor(lidar)

            else:

                # Alternatively, we can load these settings from a file.
                with open(args.settings_filepath, 'r') as fp:
                    settings = fp.read()

            # Now we load these settings into the server. The server replies
            # with a scene description containing the available start spots for
            # the player. Here we can provide a CarlaSettings object or a
            # CarlaSettings.ini file as string.
            scene = client.load_settings(settings)

            # Choose one player start.
            number_of_player_starts = len(scene.player_start_spots)
            player_start = args.playerStart

            # Notify the server that we want to start the episode at the
            # player_start index. This function blocks until the server is ready
            # to start the episode.
            print('Starting new episode...')
            client.start_episode(player_start)

            # Iterate every frame in the episode.
            for frame in range(0, frames_per_episode):

                print('Frame : ', frame)
                save_bool = True

                # Read the data produced by the server this frame.
                measurements, sensor_data = client.read_data()

                # Save Trajectory
                save_trajectory(frame, measurements)

                # Read trajectory to check if it is the same #bbescos
                file = open(args.trajectoryFile, 'r')
                lines = file.readlines()
                line = lines[frame]
                words = line.split()
                pos_x_s = float(words[1])
                if abs(pos_x_s -
                       measurements.player_measurements.transform.location.x
                       ) > 0.1 * 5:
                    save_bool = False
                    print(
                        pos_x_s,
                        measurements.player_measurements.transform.location.x)
                pos_y_s = float(words[2])
                if abs(pos_y_s -
                       measurements.player_measurements.transform.location.y
                       ) > 0.1 * 5:
                    save_bool = False
                    print(
                        pos_y_s,
                        measurements.player_measurements.transform.location.y)
                file.close()

                # Save the images to disk if requested.
                if args.save_images_to_disk and frame % 10 == 0 and frame > 29 and save_bool:
                    for name, measurement in sensor_data.items():
                        filename = args.out_filename_format.format(
                            episode, name, frame)
                        measurement.save_to_disk(filename)

                # Now we have to send the instructions to control the vehicle.
                # If we are in synchronous mode the server will pause the
                # simulation until we send this control.
                if not args.autopilot:

                    client.send_control(steer=random.uniform(-1.0, 1.0),
                                        throttle=0.5,
                                        brake=0.0,
                                        hand_brake=False,
                                        reverse=False)
                else:

                    # Together with the measurements, the server has sent the
                    # control that the in-game autopilot would do this frame. We
                    # can enable autopilot by sending back this control to the
                    # server.
                    control = measurements.player_measurements.autopilot_control
                    # Read control file
                    file = open(args.controlFile, 'r')
                    lines = file.readlines()
                    line = lines[frame]
                    words = line.split()
                    steer = float(words[1])
                    throttle = float(words[2])
                    brake = float(words[3])
                    hand_brake = (words[4] == 'True')
                    reverse = (words[5] == 'True')
                    ##
                    # steer = round(steer,10)
                    # throttle = round(throttle, 10)
                    # brake = round(brake, 10)
                    ##
                    file.close()
                    control.steer = steer
                    control.throttle = throttle
                    control.brake = brake
                    control.hand_brake = hand_brake
                    control.reverse = reverse
                    save_control(frame, control)
                    client.send_control(control)
Example #20
0
def run_carla_client(args):

    # Settings
    # Town 1
    ## Natural turns: [42,67,69,79,94,97,70,44,85,102], [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]
    ## T-intersection: [88,90,107,133,136]
    ## Side intersection left: [ 66, 16, 25 ]
    ## Side intersection right: [ 138, 19, 26 ]
    ## Staight: [14, 149, 78, 71, 89]

    # Town 2
    ## Natural turns right: [65, 78, 44, 1]
    ## Natural turns left: [49, 50, 57, 70]
    ## T-intersection [2, 5, 10, 11, 19, 26, 34, 37]
    ## Side intersection left: [7, 23, 16, 39, 6, 43, 79]
    ## Side intersection right: [40, 20, 28, 32, 34, 46, 71, 74]
    ## Straight: [61, 64, 77, 51]

    positions = args.positions or [65, 78, 49, 50, 2, 5, 10, 7, 23, 40, 20, 61]
    levels_of_randomness = args.randomness or [0.0, 0.2, 0.4, 0.6]
    frames_per_level = args.frames or [
        200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200
    ]

    print("Positions: ", positions)
    print("Levels of randomness: ", levels_of_randomness)
    print("Frames per level: ", frames_per_level)

    # We assume the CARLA server is already waiting for a client to connect at
    # host:port. To create a connection we can use the `make_carla_client`
    # context manager, it creates a CARLA client object and starts the
    # connection. It will throw an exception if something goes wrong. The
    # context manager makes sure the connection is always cleaned up on exit.
    with make_carla_client(args.host, args.port) as client:
        print('CarlaClient connected')

        directions = ["_left", "_right"
                      ] if args.force_left_and_right else ["_"]
        for direction in directions:
            print("Direction ", direction)
            # Create a CarlaSettings object. This object is a wrapper around
            # the CarlaSettings.ini file. Here we set the configuration we
            # want for the new episode.
            settings = CarlaSettings()
            direction_var = 6 if direction == "_right" else 0
            settings.set(SynchronousMode=True,
                         SendNonPlayerAgentsInfo=True,
                         NumberOfVehicles=0,
                         NumberOfPedestrians=0,
                         WeatherId=random.choice([2]),
                         QualityLevel=args.quality_level,
                         SeedVehicles=direction_var)
            #settings.randomize_seeds()

            if args.capture:
                # To create visualisation of the current run
                camera3 = Camera('CameraRGB')
                camera3.set_image_size(512, 256)
                camera3.set_position(-8, 0, 5)
                camera3.set(FOV=args.fov)
                camera3.set_rotation(pitch=-20, yaw=0, roll=0)
                settings.add_sensor(camera3)
                # Now we want to add a couple of cameras to the player vehicle.
                # We will collect the images produced by these cameras every
                # frame.
                if args.point_cloud:
                    # Camera to produce point Cloud
                    camera1 = Camera('CameraDepth', PostProcessing='Depth')
                    camera1.set_image_size(256, 256)
                    camera1.set_position(2.2, 0, 1.30)
                    camera1.set(FOV=args.fov)
                    settings.add_sensor(camera1)

                    camera2 = Camera('CameraSeg',
                                     PostProcessing='SemanticSegmentation')
                    camera2.set_image_size(256, 256)
                    camera2.set_position(2.2, 0, 1.30)
                    camera2.set(FOV=args.fov)
                    settings.add_sensor(camera2)

                if args.lidar:
                    lidar = Lidar('Lidar32')
                    lidar.set_position(0, 0, 2.50)
                    lidar.set_rotation(0, 0, 0)
                    lidar.set(Channels=32,
                              Range=50,
                              PointsPerSecond=100000,
                              RotationFrequency=10,
                              UpperFovLimit=10,
                              LowerFovLimit=-30)
                    settings.add_sensor(lidar)

            # Now we load these settings into the server. The server replies
            # with a scene description containing the available start spots for
            # the player. Here we can provide a CarlaSettings object or a
            # CarlaSettings.ini file as string.
            scene = client.load_settings(settings)

            # Notify the server that we want to start the episode at the
            # player_start index. This function blocks until the server is ready
            # to start the episode.

            for episode, position in enumerate(positions):
                for randomness, frames in zip(levels_of_randomness,
                                              frames_per_level):
                    crashed = True
                    while crashed:
                        crashed = False
                        # Start a new episode.
                        print('Starting new episode at %r...' % scene.map_name)
                        print('Episode {}, Position {}, Randomness {}'.format(
                            episode, position, randomness))
                        client.start_episode(position)

                        if args.capture:
                            # Make sure directories exist
                            directory = '_out/pos{}{}/randomness_{}'.format(
                                position, direction, randomness)
                            ply_dir = '{}/point_clouds'.format(directory)
                            ply_dir_full = '{}/point_clouds_full'.format(
                                directory)
                            lidar_dir = '{}/lidar'.format(directory)
                            img_dir = '{}/images'.format(directory)
                            if not os.path.exists(img_dir):
                                os.makedirs(img_dir)
                            if args.point_cloud and not os.path.exists(
                                    ply_dir):
                                os.makedirs(ply_dir)
                            if args.point_cloud and not os.path.exists(
                                    ply_dir_full):
                                os.makedirs(ply_dir_full)
                            if args.lidar and not os.path.exists(lidar_dir):
                                os.makedirs(lidar_dir)
                            # Write steering data to csv file
                            if args.point_cloud:
                                csv = open(
                                    "{}/driving_data.csv".format(ply_dir), "w")
                            elif args.lidar:
                                csv = open(
                                    "{}/driving_data.csv".format(lidar_dir),
                                    "w")
                            csv.write(",name,speed,throttle,steer\n")

                        # Iterate every frame in the episode
                        for frame in tqdm(range(frames)):
                            # Read the data produced by the server this frame.
                            measurements, sensor_data = client.read_data()

                            if args.capture:
                                if args.point_cloud:
                                    # Save processed point clouds and autopilot steering to disk if requested
                                    # Get depth and seg as numpy array for further processing
                                    depth_obj = sensor_data['CameraDepth']
                                    depth = depth_obj.data
                                    fov = depth_obj.fov

                                    # Filter seg to get intersection points
                                    seg = sensor_data['CameraSeg'].data
                                    filtered_seg = pp.filter_seg_image(seg)

                                    if args.full_point_cloud:
                                        # Converting depth image to grayscale
                                        point_cloud_full = pp.depth_to_local_point_cloud(
                                            depth,
                                            fov,
                                            seg,
                                            max_depth=0.05,
                                            full=True)
                                        filename_full = "point_cloud_full_{:0>5d}".format(
                                            frame)
                                        pp.save_to_disk(
                                            point_cloud_full,
                                            "{}/{}.ply".format(
                                                ply_dir_full, filename_full))

                                    # Create pointcloud from seg and depth (but only take intersection points)
                                    point_cloud = pp.depth_to_local_point_cloud(
                                        depth,
                                        fov,
                                        filtered_seg,
                                        max_depth=0.05)
                                    filename = "point_cloud_{:0>5d}".format(
                                        frame)
                                    pp.save_to_disk(
                                        point_cloud,
                                        "{}/{}.ply".format(ply_dir, filename))

                                if args.lidar:
                                    sensor_data['Lidar32'].save_to_disk(
                                        '{}/point_cloud_{:0>5d}'.format(
                                            lidar_dir, frame))

                                # Save steering data of this frame
                                control = measurements.player_measurements.autopilot_control
                                csv.write("0,image_{:0>5d},0,0,{}\n".format(
                                    frame, control.steer))

                                # Save rgb image to visualize later
                                sensor_data['CameraRGB'].save_to_disk(
                                    '{}/image_{:0>5d}.png'.format(
                                        img_dir, frame))

                            # Now we have to send the instructions to control the vehicle.
                            # If we are in synchronous mode the server will pause the
                            # simulation until we send this control.

                            control = measurements.player_measurements.autopilot_control
                            speed = measurements.player_measurements.forward_speed
                            old_steer = control.steer
                            control.steer += random.uniform(
                                -randomness, randomness)

                            if args.ignore_red_lights:
                                control.throttle = 0.5
                                control.brake = 0.0
                                control.hand_brake = False
                                control.reverse = False

                            client.send_control(control)
                            crashed = False
                            if speed < 1 and abs(old_steer) > 0.5:
                                print(
                                    "\nSeems like we crashed.\nTrying again..."
                                )
                                crashed = True
                                break
Example #21
0
def run_carla_client(args):
    # Here we will run 3 episodes with 300 frames each.
    number_of_episodes = 500000
    frames_per_episode = 300




    # We assume the CARLA server is already waiting for a client to connect at
    # host:port. To create a connection we can use the `make_carla_client`
    # context manager, it creates a CARLA client object and starts the
    # connection. It will throw an exception if something goes wrong. The
    # context manager makes sure the connection is always cleaned up on exit.
    with make_carla_client(args.host, args.port) as client:
        print('CarlaClient connected')

        #########################
        for episode in range(0, number_of_episodes):
            # Start a new episode.

            if args.settings_filepath is None:

                # Create a CarlaSettings object. This object is a wrapper around
                # the CarlaSettings.ini file. Here we set the configuration we
                # want for the new episode.
                settings = CarlaSettings()
                settings.set(
                    SynchronousMode=True,
                    SendNonPlayerAgentsInfo=True,
                    NumberOfVehicles=0,
                    NumberOfPedestrians=0,
                    # WeatherId=random.choice([1, 3, 7, 8, 14]),
                    WeatherId=1,
                    QualityLevel=args.quality_level)
                settings.randomize_seeds()

                # Now we want to add a couple of cameras to the player vehicle.
                # We will collect the images produced by these cameras every
                # frame.

                # The default camera captures RGB images of the scene.
                camera0 = Camera('CameraRGB')
                # Set image resolution in pixels.
                camera0.set_image_size(210, 160)
                # Set its position relative to the car in meters.
                camera0.set_position(0.30, 0, 1.30)
                settings.add_sensor(camera0)

                # Let's add another camera producing ground-truth depth.
                camera1 = Camera('CameraDepth', PostProcessing='Depth')
                camera1.set_image_size(210, 160)
                camera1.set_position(0.30, 0, 1.30)
                settings.add_sensor(camera1)

                if args.lidar:
                    lidar = Lidar('Lidar32')
                    lidar.set_position(0, 0, 2.50)
                    lidar.set_rotation(0, 0, 0)
                    lidar.set(
                        Channels=32,
                        Range=50,
                        PointsPerSecond=100000,
                        RotationFrequency=10,
                        UpperFovLimit=10,
                        LowerFovLimit=-30)
                    settings.add_sensor(lidar)

            else:

                # Alternatively, we can load these settings from a file.
                with open(args.settings_filepath, 'r') as fp:
                    settings = fp.read()

            # Now we load these settings into the server. The server replies
            # with a scene description containing the available start spots for
            # the player. Here we can provide a CarlaSettings object or a
            # CarlaSettings.ini file as string.
            scene = client.load_settings(settings)

            # Choose one player start at random.
            #从哪里开始
           # number_of_player_starts = len(scene.player_start_spots)
        # player_start = random.randint(0, max(0, number_of_player_starts - 1))

            # Notify the server that we want to start the episode at the
            # player_start index. This function blocks until the server is ready
            # to start the episode.
            print('Starting new episode at %r...' % scene.map_name)
            #client.start_episode(player_start)
            client.start_episode(0)
            #--------------------------------------------------------------------

            # Iterate every frame in the episode.
            # Read the data produced by the server this frame.
            # 这个应该是获取到的数据
            measurements, sensor_data = client.read_data()

            # Print some of the measurements.
            image_RGB = to_rgb_array(sensor_data['CameraRGB'])
            image_RGB_real=image_RGB.flatten()
            print(image_RGB_real)
            print(image_RGB_real.shape[0])
            ###############################################################################################
            if k==1:
                player_measurements = measurements.player_measurements
                loc = np.array([player_measurements.transform.location.x, player_measurements.transform.location.y])
                RL = DeepQNetwork(n_actions=8,
                                  n_features=image_RGB_real.shape[0],
                                  learning_rate=0.01, e_greedy=0.9,
                                  replace_target_iter=100, memory_size=2000,
                                  e_greedy_increment=0.001, )

                total_steps = 0
            else:
                total_steps = 0
                print("rl运行完毕")
            k=2
            for frame in range(0, frames_per_episode):
                measurements, sensor_data = client.read_data()
                player_measurements = measurements.player_measurements
                other_lane = 100 * player_measurements.intersection_otherlane
                offroad = 100 * player_measurements.intersection_offroad
                # loc1=np.array([player_measurements.transform.location.x, player_measurements.transform.location.y])
                # print(offroad)
                # print(other_lane)
                # print(loc1)
                image_RGB = to_rgb_array(sensor_data['CameraRGB'])
                image_RGB_real = image_RGB.flatten()
                print_measurements(measurements)
                observation = image_RGB_real
                ep_r = 0
                done = False
                # while True:
                #     # env.render()

                action = RL.choose_action(observation)

                if not args.autopilot:
                    brake1=0.0
                    steer1=0.0
                    if (action > 4):
                        brake1 = actions[action]
                    else:
                        steer1 = actions[action]

                    client.send_control(
                        # steer=random.uniform(-1.0, 1.0),

                        steer=steer1,
                        throttle=0.6,
                        brake=brake1,
                        hand_brake=False,
                        reverse=False)


                else:

                    # Together with the measurements, the server has sent the
                    # control that the in-game autopilot would do this frame. We
                    # can enable autopilot by sending back this control to the
                    # server. We can modify it if wanted, here for instance we
                    # will add some noise to the steer.

                    control = measurements.player_measurements.autopilot_control
                    # control.steer += random.uniform(-0.1, 0.1)
                    client.send_control(control)

                # loc2=np.array([player_measurements.transform.location.x, player_measurements.transform.location.y])
                image_RGB = to_rgb_array(sensor_data['CameraRGB'])
                image_RGB_rea2 = image_RGB.flatten()
                observation_ = image_RGB_rea2
                reward = -other_lane - offroad+np.sqrt(np.square(player_measurements.transform.location.x-271.0)+np.square(player_measurements.transform.location.y-129.5))
                print(offroad)

                if offroad > 20 or other_lane>10:
                    print(111111111111111111111)
                    done = True

                    # the smaller theta and closer to center the better
                    # x, x_dot, theta, theta_dot = observation_
                    # r1 = (env.x_threshold - abs(x))/env.x_threshold - 0.8
                    # r2 = (env.theta_threshold_radians - abs(theta))/env.theta_threshold_radians - 0.5
                    # reward = r1 + r2

                RL.store_transition(observation, action, reward, observation_)

                ep_r += reward
                if total_steps > 100:
                    RL.learn(do_train=1)

                if done:
                    print('episode: ',
                              'ep_r: ', round(ep_r, 2),
                              ' epsilon: ', round(RL.epsilon, 2))

                    # client.start_episode(0)
                    break

                observation = observation_
                total_steps += 1

              #  RL.plot_cost()



                # Save the images to disk if requested.
                if args.save_images_to_disk:
                    for name, measurement in sensor_data.items():
                        filename = args.out_filename_format.format(episode, name, frame)
                        measurement.save_to_disk(filename)
            continue
def run_carla_client(args, classifier, plt, plt_index):
    global global_steering_indicator
    # Here we will run 3 episodes with 300 frames each.
    frames_per_episode = args.frames
    
    if args.position:
      number_of_episodes = 1
    else:
      number_of_episodes = 3

    # We assume the CARLA server is already waiting for a client to connect at
    # host:port. To create a connection we can use the `make_carla_client`
    # context manager, it creates a CARLA client object and starts the
    # connection. It will throw an exception if something goes wrong. The
    # context manager makes sure the connection is always cleaned up on exit.
    with make_carla_client(args.host, args.port) as client:
        print('CarlaClient connected')

        for episode in range(0, number_of_episodes):
            # Start a new episode.

            if args.settings_filepath is None:

                # Create a CarlaSettings object. This object is a wrapper around
                # the CarlaSettings.ini file. Here we set the configuration we
                # want for the new episode.
                settings = CarlaSettings()
                settings.set(
                    SynchronousMode=True,
                    SendNonPlayerAgentsInfo=True,
                    NumberOfVehicles=0,
                    NumberOfPedestrians=0,
                    WeatherId=random.choice([2]),
                    QualityLevel=args.quality_level)
                settings.randomize_seeds()

                # Now we want to add a couple of cameras to the player vehicle.
                # We will collect the images produced by these cameras every
                # frame.

                # # The default camera captures RGB images of the scene.
                # camera0 = Camera('CameraRGB')
                # # Set image resolution in pixels.
                # camera0.set_image_size(256, 256)
                # # Set its position relative to the car in meters.
                # camera0.set_position(2.2, 0, 1.30)
                # settings.add_sensor(camera0)

                if args.lidar:
                    # Adding a lidar sensor
                    lidar = Lidar('Lidar32')
                    lidar.set_position(0, 0, 2.50)
                    lidar.set_rotation(0, 0, 0)
                    lidar.set(
                        Channels=32,
                        Range=50,
                        PointsPerSecond=args.lidar_pps,
                        RotationFrequency=10,
                        UpperFovLimit=10,
                        LowerFovLimit=-30)
                    settings.add_sensor(lidar)
                else:
                    # Let's add another camera producing ground-truth depth.
                    camera1 = Camera('CameraDepth', PostProcessing='Depth')
                    camera1.set_image_size(256, 256)
                    camera1.set_position(2.2, 0, 1.30)
                    camera1.set(FOV=90.0)
                    #camera1.set_rotation(pitch=-8, yaw=0, roll=0)
                    settings.add_sensor(camera1)

                    camera2 = Camera('CameraSeg', PostProcessing='SemanticSegmentation')
                    camera2.set_image_size(256, 256)
                    camera2.set_position(2.2, 0, 1.30)
                    camera2.set(FOV=90.0)
                    #camera2.set_rotation(pitch=-8, yaw=0, roll=0)
                    settings.add_sensor(camera2)

                if args.capture:
                  camera3 = Camera('CameraRGB')
                  camera3.set_image_size(512, 256)
                  camera3.set_position(-8, 0, 5)
                  camera3.set(FOV=90.0)
                  camera3.set_rotation(pitch=-20, yaw=0, roll=0)
                  settings.add_sensor(camera3)


            else:

                # Alternatively, we can load these settings from a file.
                with open(args.settings_filepath, 'r') as fp:
                    settings = fp.read()

            # Now we load these settings into the server. The server replies
            # with a scene description containing the available start spots for
            # the player. Here we can provide a CarlaSettings object or a
            # CarlaSettings.ini file as string.
            scene = client.load_settings(settings)

            # Choose one player start at random.
            number_of_player_starts = len(scene.player_start_spots)
            if args.position:
              player_start = args.position
            else:
              player_start = random.choice([42,67,69,79,94,97, 70, 44,85,102])
            

            # Notify the server that we want to start the episode at the
            # player_start index. This function blocks until the server is ready
            # to start the episode.
            print('Starting new episode at %r...' % scene.map_name)

            """ Begin added code """
            if args.capture:
              directory = '_capture/pos{}'.format(player_start)
              if not os.path.exists(directory):
                  os.makedirs(directory)
              print("Capturing point clouds to {}".format(directory))


            """ End added code """

            client.start_episode(player_start)

            # Iterate every frame in the episode.
            for frame in range(0, frames_per_episode):

                # Read the data produced by the server this frame.
                measurements, sensor_data = client.read_data()

                """ Begin added code """
                # dict of steering directions of available curves
                # [1,0] if the next curve will be a left one
                # [0,1] if the next curve will be a right one
                directions = {
                  67: [[1,0]], # straight
                  42: [[1,0]], # straight or left
                  69: [[0,1]], # right
                  79: [[0,1]], # right
                  94: [[0,1]], # right
                  97: [[0,1]], # right
                  70: [[1,0]], # left
                  44: [[1,0]], # left
                  85: [[1,0]], # left
                  102: [[1,0]] # left
                }

                #dynamically set the global steering indicator during the game
                if(args.key_control):
                  set_steering_indicator_from_keypress()

                steering_direction = args.steering_indicator or global_steering_indicator

                if args.use_steering_indicator:
                  if steering_direction == "left":
                    steering_indicator = [[1,0]]
                  elif steering_direction == "right":
                    steering_indicator = [[0,1]]
                  else:
                    steering_indicator = [[0,0]]
                  steering_indicator = torch.Tensor(steering_indicator)
                  if cuda_available:
                    steering_indicator = steering_indicator.cuda()

                if args.lidar:
                    point_cloud = sensor_data['Lidar32'].data
                    if args.lidar_fov == 180:
                      print("FOV 180")
                      print("Before", point_cloud.shape)
                      point_cloud = filter_fov(point_cloud)
                      print("After", point_cloud.shape)


                else:
                    # Get depth and seg as numpy array for further processing
                    depth_obj = sensor_data['CameraDepth']
                    depth = depth_obj.data
                    fov = depth_obj.fov
                    
                    # Filter seg to get intersection points
                    seg = sensor_data['CameraSeg'].data
                    filtered_seg = pp.filter_seg_image(seg)

                    # Add following lines to measure performance of generating pointcloud
                    # def f():
                    #   return pp.depth_to_local_point_cloud(depth, fov, filtered_seg)
                    # print(timeit.timeit(f, number=1000) / 1000)

                    # Create pointcloud from seg and depth (but only take intersection points)
                    point_cloud = pp.depth_to_local_point_cloud(depth, fov, filtered_seg)

                # Save point cloud for later visualization
                if args.capture:
                  pp.save_to_disk(point_cloud, "{}/point_clouds/point_cloud_{:0>5d}.ply".format(directory, frame))
                  sensor_data['CameraRGB'].save_to_disk('{}/images/image_{:0>5d}.png'.format(directory, frame))
                

                # Normalizing the point cloud
                if not args.no_center or not args.no_norm:
                  point_cloud = point_cloud - np.expand_dims(np.mean(point_cloud, axis=0), 0)  # center
                
                if not args.no_norm:
                  dist = np.max(np.sqrt(np.sum(point_cloud ** 2, axis=1)), 0)
                  point_cloud = point_cloud / dist  # scale

                #pp.save_point_cloud(point_cloud, 'test')
                # pp.save_seg_image(filtered_seg)
                """ End added code """
                # Print some of the measurements.
                #print_measurements(measurements)

                # # Save the images to disk if requested.
                # if args.save_images_to_disk:
                #     for name, measurement in sensor_data.items():
                #         filename = args.out_filename_format.format(episode, name, frame)
                #         measurement.save_to_disk(filename)

                # We can access the encoded data of a given image as numpy
                # array using its "data" property. For instance, to get the
                # depth value (normalized) at pixel X, Y
                #
                #     depth_array = sensor_data['CameraDepth'].data
                #     value_at_pixel = depth_array[Y, X]
                #

                # Now we have to send the instructions to control the vehicle.
                # If we are in synchronous mode the server will pause the
                # simulation until we send this control.
                
                if not args.autopilot:
                    client.send_control(
                        steer=random.uniform(-1.0, 1.0),
                        throttle=0.5,
                        brake=0.0,
                        hand_brake=False,
                        reverse=False)

                else:

                    # Together with the measurements, the server has sent the
                    # control that the in-game autopilot would do this frame. We
                    # can enable autopilot by sending back this control to the
                    # server. We can modify it if wanted, here for instance we
                    # will add some noise to the steer.


                    """ Beginn added code """
                    control = measurements.player_measurements.autopilot_control
                    #steer = control.steer
                    #print(control)

                    point_cloud = point_cloud.transpose()
                    points = np.expand_dims(point_cloud, axis=0)
                    points = torch.from_numpy(points.astype(np.float32))
                    #print(points)
                    if cuda_available:
                      points = points.cuda()
                    classifier = classifier.eval()

                    if args.use_steering_indicator:
                      #print("Using PointNetReg2")
                      steer, _, _ = classifier((points, steering_indicator))
                    else:
                      #print("Using PointNetReg")
                      steer, _, _ = classifier(points)
                    steer = steer.item()
                    if args.use_steering_indicator:
                      print("Using steering indicator: {} / {}".format(steering_direction, steering_indicator))
                    print("Autopilot steer: ", control.steer)
                    print("Pointnet steer: ", steer)
                    print("Relative difference: ", steer / control.steer if control.steer != 0.0 else 'NaN')
                    print("Absolute difference: ", control.steer - steer)
                    print("FOV:", args.lidar_fov)

                    # Plot some visualizations to visdom
                    if args.visdom:
                      plt.plot_point_cloud(points, var_name='pointcloud')
                      plt.plot('steering', 'PointNet', 'Steering', plt_index, steer)
                      plt.plot('steering', 'Autopilot', 'Steering', plt_index, control.steer)
                      plt_index += 1

                    # Let pointnet take over steering control
                    if True:
                      print("Who's in command: POINTNET")
                      control.steer = steer
                      if args.ignore_red_lights or args.use_steering_indicator:
                        control.throttle = 0.5
                        control.brake=0.0
                        hand_brake=False

                    else:
                      print("Who's in command: AUTOPILOT")

                    print("_________")
                    #pdb.set_trace()
                    """ End added code """
                    client.send_control(control)
Example #23
0
def run_carla_client(args):
    skip_frames = 100  # 100 # at 10 fps
    number_of_episodes = args.n_episode
    frames_per_episode = args.n_frame

    # n_weather = 14 # weathers starts from 1
    # n_player_start_spots = 152 # Town01
    n_player_start_spots = 83  # Town02

    weathers = number_of_episodes * [2]  # CloudyNoon
    start_spots = list(range(n_player_start_spots))
    random.shuffle(start_spots)
    assert number_of_episodes < n_player_start_spots
    start_spots = start_spots[:number_of_episodes]

    # weathers = list(range(number_of_episodes))
    # # random.shuffle(weathers)
    # weathers = [w % 14 + 1 for w in weathers]
    # https://carla.readthedocs.io/en/latest/carla_settings/

    # number_of_episodes = n_weather*n_player_start_spots
    # frames_per_episode = args.n_frame

    # weathers, start_spots = np.meshgrid(list(range(1, n_weather+1)), list(range(n_player_start_spots)))

    # weathers = weathers.flatten()
    # start_spots = start_spots.flatten()

    if not os.path.isdir(args.out_dir):
        os.makedirs(args.out_dir)
    np.savetxt(join(args.out_dir, 'weathers.txt'), weathers, fmt='%d')
    np.savetxt(join(args.out_dir, 'start-spots.txt'), start_spots, fmt='%d')
    # We assume the CARLA server is already waiting for a client to connect at
    # host:port. To create a connection we can use the `make_carla_client`
    # context manager, it creates a CARLA client object and starts the
    # connection. It will throw an exception if something goes wrong. The
    # context manager makes sure the connection is always cleaned up on exit.
    with make_carla_client(args.host, args.port) as client:
        print('CarlaClient connected')

        for episode in range(number_of_episodes):
            # Start a new episode.

            if args.settings_filepath is None:

                # Create a CarlaSettings object. This object is a wrapper around
                # the CarlaSettings.ini file. Here we set the configuration we
                # want for the new episode.
                settings = CarlaSettings()
                settings.set(
                    SynchronousMode=True,
                    SendNonPlayerAgentsInfo=True,
                    NumberOfVehicles=20,
                    NumberOfPedestrians=40,
                    WeatherId=weathers[episode],
                    # WeatherId=random.randrange(14) + 1,
                    # WeatherId=random.choice([1, 3, 7, 8, 14]),
                    QualityLevel=args.quality_level)
                settings.randomize_seeds()

                # Now we want to add a couple of cameras to the player vehicle.
                # We will collect the images produced by these cameras every
                # frame.

                # The default camera captures RGB images of the scene.
                camera0 = Camera('RGB')
                # Set image resolution in pixels.
                camera0.set(FOV=args.cam_fov)
                camera0.set_image_size(args.x_res, args.y_res)
                # Set its position relative to the car in meters.
                camera0.set_position(*args.cam_offset)
                camera0.set_rotation(*args.cam_rotation)
                settings.add_sensor(camera0)

                # Let's add another camera producing ground-truth depth.
                camera1 = Camera('Depth', PostProcessing='Depth')
                camera1.set(FOV=args.cam_fov)
                camera1.set_image_size(args.x_res, args.y_res)
                camera1.set_position(*args.cam_offset)
                camera1.set_rotation(*args.cam_rotation)
                settings.add_sensor(camera1)

                camera2 = Camera('SegRaw',
                                 PostProcessing='SemanticSegmentation')
                camera2.set(FOV=args.cam_fov)
                camera2.set_image_size(args.x_res, args.y_res)
                camera2.set_position(*args.cam_offset)
                camera2.set_rotation(*args.cam_rotation)
                settings.add_sensor(camera2)

                if args.lidar:
                    lidar = Lidar('Lidar32')
                    lidar.set_position(0, 0, 2.50)
                    lidar.set_rotation(0, 0, 0)
                    lidar.set(Channels=32,
                              Range=50,
                              PointsPerSecond=100000,
                              RotationFrequency=10,
                              UpperFovLimit=10,
                              LowerFovLimit=-30)
                    settings.add_sensor(lidar)

            else:

                # Alternatively, we can load these settings from a file.
                with open(args.settings_filepath, 'r') as fp:
                    settings = fp.read()

            # Now we load these settings into the server. The server replies
            # with a scene description containing the available start spots for
            # the player. Here we can provide a CarlaSettings object or a
            # CarlaSettings.ini file as string.
            scene = client.load_settings(settings)

            # Choose one player start at random.
            # number_of_player_starts = len(scene.player_start_spots)
            # player_start = random.randrange(number_of_player_starts)

            player_start = start_spots[episode]

            # Notify the server that we want to start the episode at the
            # player_start index. This function blocks until the server is ready
            # to start the episode.
            print('Starting new episode...')
            client.start_episode(player_start)

            frame = 0
            save_frame_idx = 0

            # extra_frames = 0
            # last_control = None
            # last_control_changed = 0
            # while frame < skip_frames + frames_per_episode + extra_frames:

            # Iterate every frame in the episode.
            for frame in range(skip_frames + frames_per_episode):
                # Read the data produced by the server this frame.
                measurements, sensor_data = client.read_data()

                # Print some of the measurements.
                print_measurements(measurements)

                # Save the images to disk if requested.
                if args.save_images_to_disk and frame >= skip_frames \
                    and (frame - skip_frames) % args.save_every_n_frames == 0:
                    # if last_control_changed < frame - args.save_every_n_frames:
                    #     extra_frames += args.save_every_n_frames
                    #     last_control_changed += args.save_every_n_frames
                    # else:
                    save_frame_idx += 1
                    for name, measurement in sensor_data.items():
                        filename = args.out_filename_format.format(
                            episode + 1, name, save_frame_idx)
                        measurement.save_to_disk(filename)

                # We can access the encoded data of a given image as numpy
                # array using its "data" property. For instance, to get the
                # depth value (normalized) at pixel X, Y
                #
                #     depth_array = sensor_data['CameraDepth'].data
                #     value_at_pixel = depth_array[Y, X]
                #

                # Now we have to send the instructions to control the vehicle.
                # If we are in synchronous mode the server will pause the
                # simulation until we send this control.

                if not args.autopilot:

                    client.send_control(steer=random.uniform(-1.0, 1.0),
                                        throttle=0.5,
                                        brake=0.0,
                                        hand_brake=False,
                                        reverse=False)

                else:

                    # Together with the measurements, the server has sent the
                    # control that the in-game autopilot would do this frame. We
                    # can enable autopilot by sending back this control to the
                    # server. We can modify it if wanted, here for instance we
                    # will add some noise to the steer.

                    control = measurements.player_measurements.autopilot_control
                    # if last_control:
                    #     for v1, v2 in zip(control.values(), last_control.values()):
                    #         if v1 != v2:
                    #             last_control_changed = frame
                    #             break

                    control.steer += random.uniform(-0.1, 0.1)
                    client.send_control(control)
Example #24
0
def run_carla_client(args):

    # Here we will run 3 episodes with 300 frames each.
    number_of_episodes = 10
    frames_per_episode = 10000

    # We assume the CARLA server is already waiting for a client to connect at
    # host:port. To create a connection we can use the `make_carla_client`
    # context manager, it creates a CARLA client object and starts the
    # connection. It will throw an exception if something goes wrong. The
    # context manager makes sure the connection is always cleaned up on exit.

    with make_carla_client(args.host, args.port) as client:
        print('CarlaClient connected')

        for i_episode in range(0, number_of_episodes):
            # Start a new episode.

            if not args.autopilot:
                lane_detection = Popen(
                    [
                        "/home/minghanz/lane-detection/newcheck/SFMotor/lane detection/lane-detection"
                    ],
                    cwd=
                    "/home/minghanz/lane-detection/newcheck/SFMotor/lane detection"
                )

            if args.settings_filepath is None:

                # Create a CarlaSettings object. This object is a wrapper around
                # the CarlaSettings.ini file. Here we set the configuration we
                # want for the new episode.
                weather_ID = random.choice(range(0, 15))
                settings = CarlaSettings()
                settings.set(
                    SynchronousMode=True,
                    SendNonPlayerAgentsInfo=True,
                    NumberOfVehicles=0,  # 20
                    NumberOfPedestrians=0,  # 40
                    WeatherId=
                    weather_ID,  # 2,                     # random.choice([1, 3, 7, 8, 14]),
                    QualityLevel=args.quality_level)  #,
                # MapName = 'Town01' )
                settings.randomize_seeds()
                # 1 (with shadow) sunny, 2 bright cloudy, 3 (with shadow), after rain, 4 (ambient light) after rain,
                # 5 middle rain, 6 big rain, 7 small rain, 8 dark, 9 cloudy,
                # 10(cloudy) 11(sunny) after rain, 12 13 rain,

                # Now we want to add a couple of cameras to the player vehicle.
                # We will collect the images produced by these cameras every
                # frame.

                # # The default camera captures RGB images of the scene.
                camera0 = Camera('CameraRGB')  # , PostProcessing='None'
                # Set image resolution in pixels.
                camera0.set_image_size(800, 600)
                # Set its position relative to the car in meters.
                camera0.set_position(0.3, 0, 2.30)  # 0.3, 0, 1.3
                settings.add_sensor(camera0)

                # Let's add another camera producing ground-truth depth.
                camera2 = Camera('CameraDepth', PostProcessing='Depth')
                camera2.set_image_size(800, 600)
                camera2.set_position(0.30, 0, 2.30)
                settings.add_sensor(camera2)

                camera3 = Camera('CameraSemantics',
                                 PostProcessing='SemanticSegmentation')
                camera3.set_image_size(800, 600)
                camera3.set_position(0.30, 0, 2.30)
                settings.add_sensor(camera3)

                if args.lidar:
                    lidar = Lidar('Lidar32')
                    lidar.set_position(0, 0, 2.50)
                    lidar.set_rotation(0, 0, 0)
                    lidar.set(Channels=32,
                              Range=50,
                              PointsPerSecond=100000,
                              RotationFrequency=20,
                              UpperFovLimit=10,
                              LowerFovLimit=-30)
                    settings.add_sensor(lidar)

            else:

                # Alternatively, we can load these settings from a file.
                with open(args.settings_filepath, 'r') as fp:
                    settings = fp.read()

            # Now we load these settings into the server. The server replies
            # with a scene description containing the available start spots for
            # the player. Here we can provide a CarlaSettings object or a
            # CarlaSettings.ini file as string.
            scene = client.load_settings(settings)

            # Choose one player start at random.
            number_of_player_starts = len(scene.player_start_spots)
            player_start = random.randint(0, max(0,
                                                 number_of_player_starts - 1))
            # player_start = 31

            # Notify the server that we want to start the episode at the
            # player_start index. This function blocks until the server is ready
            # to start the episode.
            print('\rStarting new episode %d...weather: %d, player_start: %d' %
                  (i_episode, weather_ID, player_start))
            client.start_episode(player_start)
            px_l = -1
            py_l = -1
            ######### Parameters for one test
            lc_num = 0
            lc_hold = False
            lc_start_turn = False
            lc_num_2 = 100  # for counting failures in turning
            lc_num_3 = 0
            #############

            if args.writepose:
                pose_txt_obj = open('pose_episode_%d.txt' % (i_episode), 'w')

            # Iterate every frame in the episode.
            for frame in range(0, frames_per_episode):

                # Read the data produced by the server this frame.
                measurements, sensor_data = client.read_data()

                #ZYX LIDAR
                # if True: #not lc_hold:
                #     result = run_cpp(sensor_data['Lidar32'].point_cloud, True)
                #     #print('LP:',LP,'END')

                #     lidar_success = False
                #     if result:
                #         lidar_success = True
                #         pts, kb, hdx, tlx = result

                # print('pts:',pts)
                # print('kb:',kb)
                # print('hdx:',hdx)
                # print('tlx:',tlx)
                if args.autopilot:
                    if measurements.player_measurements.forward_speed > 1:
                        sys.stdout.write(
                            '\r------------------------%d Running' % (frame))
                    else:
                        sys.stdout.write(
                            '\r------------------------%d Stopped' % (frame))
                    sys.stdout.flush()
                else:
                    print('------------------------%d' % (frame))

                #ZYX LIDAR

                if not args.autopilot:
                    image = sensor_data['CameraRGB'].data
                    lane_coef = send_image(image)
                    print("lane_coef: ", lane_coef)

                # Print some of the measurements.
                #print_measurements(measurements)

                # Save the images to disk if requested.
                if args.save_images_to_disk and measurements.player_measurements.forward_speed > 1:
                    for name, measurement in sensor_data.items():
                        filename = args.out_filename_format.format(
                            i_episode, weather_ID, name, frame)  # episode
                        measurement.save_to_disk(filename)

                # We can access the encoded data of a given image as numpy
                # array using its "data" property. For instance, to get the
                # depth value (normalized) at pixel X, Y
                #
                #     depth_array = sensor_data['CameraDepth'].data
                #     value_at_pixel = depth_array[Y, X]
                #

                if args.writepose:
                    player_measurements = measurements.player_measurements
                    cur_trans = player_measurements.transform.location
                    print('cur_trans: ', cur_trans)
                    cur_rot = player_measurements.transform.rotation
                    print('cur_rot: ', cur_rot)
                    pose_txt_obj.write('%f %f %f ' %
                                       (cur_trans.x, cur_trans.y, cur_trans.z))
                    pose_txt_obj.write(
                        '%f %f %f\n' %
                        (cur_rot.pitch, cur_rot.roll, cur_rot.yaw))

                # Now we have to send the instructions to control the vehicle.
                # If we are in synchronous mode the server will pause the
                # simulation until we send this control.

                if not args.autopilot:
                    player_measurements = measurements.player_measurements
                    pos_x = player_measurements.transform.location.x
                    pos_y = player_measurements.transform.location.y
                    speed = player_measurements.forward_speed * 3.6

                    #Traffic Light
                    # print('TrafficLight:-----------------')
                    # for agent in measurements.non_player_agents:
                    #     if agent.HasField('traffic_light'):
                    #         print(agent.id)
                    #         print(agent.traffic_light.transform)
                    #         print(agent.traffic_light.state)
                    #         print('-----------------')
                    #         break

                    #Traffic Light End

                    if px_l == -1:
                        px_l = pos_x
                    if py_l == -1:
                        py_l = pos_y
                    delta_x = pos_x - px_l
                    delta_y = pos_y - py_l
                    st = 0
                    #if pos_y < 11:
                    #   st = 0.3

                    # if lidar_success and hdx[0]<-2.2:
                    #     lc_num += 1
                    # else:
                    #     lc_num = 0

                    # if lc_hold:
                    #     st = 0.3

                    # if lc_num>2 and not lc_hold:
                    #     lc_hold = True
                    #     st = 0.2

                    # if  abs(lane_coef[2]) > 0.003 and frame > 100:
                    #     lc_num += 1
                    # else:
                    #     lc_num = 0

                    # if lc_num>5 and not lc_start_turn:
                    #     lc_start_turn = True

                    # if lc_start_turn and lane_coef[0] == 0:
                    #     lc_num_2 += 1

                    # if lc_hold:
                    #     st = 0.3

                    # if lc_num_2 > 5 and not lc_hold:
                    #     lc_hold = True
                    #     st = 0.25

                    if abs(lane_coef[2]) > 0.003 and frame > 300:
                        lc_num += 1
                    else:
                        lc_num = 0

                    if lc_num > 5 and not lc_start_turn:
                        lc_start_turn = True
                        lc_num_2 = 17

                    if lc_start_turn and lc_num_2 > 0:
                        lc_num_2 -= 1

                    if lc_hold:
                        st = 0.3

                    if lc_num_2 == 0 and not lc_hold:
                        lc_hold = True
                        st = 0.25

                    # print('lidar_success:', lidar_success )
                    print('lc_num:', lc_num)
                    print('lc_num_2:', lc_num_2)

                    if lc_hold and lane_coef[0] != 0:
                        a1 = lane_coef[1] + lane_coef[4]
                        a2 = lane_coef[0] + lane_coef[3] - 0.2
                        l = 5
                        k = 0.08
                        st = k * (a1 * l + a2)
                        print('a1:', a1)
                        print('a2:', a2)

                    if speed > 28:
                        br = (speed - 28) * 0.1
                        thr = 0
                    else:
                        br = 0
                        thr = (28 - speed) * 0.05 + 0.6
                    # if pos_y > 150:
                    #     thr = 1.6
                    #     br = 0

                    if lc_hold:
                        lc_num_3 += 1
                    print('lc_num_3:', lc_num_3)

                    if lc_num_3 > 185:
                        thr = 0
                        br = 1
                        st = 0
                # if pos_x > 5:
                #     st = -delta_y*10 + (2-pos_y)*0.8
                # if abs(st)<0.001:
                #      st = 0
                #st = (2-pos_y)*0.01
                    print('Steering:', st)
                    client.send_control(
                        #steer=random.uniform(-1.0, 1.0),
                        steer=st,
                        throttle=thr,
                        brake=br,
                        hand_brake=False,
                        reverse=False)
                    px_l = pos_x
                    py_l = pos_y
                else:

                    # Together with the measurements, the server has sent the
                    # control that the in-game autopilot would do this frame. We
                    # can enable autopilot by sending back this control to the
                    # server. We can modify it if wanted, here for instance we
                    # will add some noise to the steer.

                    control = measurements.player_measurements.autopilot_control
                    control.steer += random.uniform(-0.1, 0.1)
                    client.send_control(control)

            if args.writepose:
                pose_txt_obj.close()

            if not args.autopilot:
                lane_detection.kill()