コード例 #1
0
    def _on_render(self):
        gap_x = (WINDOW_WIDTH - 2 * MINI_WINDOW_WIDTH) / 3
        mini_image_y = WINDOW_HEIGHT - MINI_WINDOW_HEIGHT - gap_x

        if self._main_image is not None:
            array = image_converter.to_rgb_array(self._main_image)
            surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))
            self._display.blit(surface, (0, 0))

        if self._mini_view_image1 is not None:
            array = image_converter.to_rgb_array(self._mini_view_image1)
            surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))
            self._display.blit(surface, (gap_x, mini_image_y))

        if self._mini_view_image2 is not None:
            array = image_converter.to_rgb_array(self._mini_view_image2)
            surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))

            self._display.blit(surface,
                               (2 * gap_x + MINI_WINDOW_WIDTH, mini_image_y))

        if self._map_view is not None:
            array = self._map_view
            array = array[:, :, :3]

            new_window_width = \
                (float(WINDOW_HEIGHT) / float(self._map_shape[0])) * \
                float(self._map_shape[1])
            surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))

            w_pos = int(self._position[0] *
                        (float(WINDOW_HEIGHT) / float(self._map_shape[0])))
            h_pos = int(self._position[1] *
                        (new_window_width / float(self._map_shape[1])))

            pygame.draw.circle(surface, [255, 0, 0, 255], (w_pos, h_pos), 6, 0)
            for agent in self._agent_positions:
                if agent.HasField('vehicle'):
                    agent_position = self._map.convert_to_pixel([
                        agent.vehicle.transform.location.x,
                        agent.vehicle.transform.location.y,
                        agent.vehicle.transform.location.z
                    ])

                    w_pos = int(
                        agent_position[0] *
                        (float(WINDOW_HEIGHT) / float(self._map_shape[0])))
                    h_pos = int(agent_position[1] *
                                (new_window_width / float(self._map_shape[1])))

                    pygame.draw.circle(surface, [255, 0, 255, 255],
                                       (w_pos, h_pos), 4, 0)

            self._display.blit(surface, (WINDOW_WIDTH, 0))

        pygame.display.flip()
コード例 #2
0
    def process_sensor_data(self, sensor_data):
        _main_image = sensor_data.get('CameraRGB', None)
        _mini_view_image1 = sensor_data.get('CameraDepth', None)
        _mini_view_image2 = sensor_data.get('CameraSemSeg', None)

        t1 = image_converter.to_rgb_array(_main_image)
        t2 = np.max(
            image_converter.depth_to_logarithmic_grayscale(_mini_view_image1),
            axis=2,
            keepdims=True)
        t3 = np.max(image_converter.to_rgb_array(_mini_view_image2), axis=2)
        return [t1, t2, t3]
コード例 #3
0
 def _prepare_video_images(self):
     speed = int(self._measurements.player_measurements.forward_speed * 3.6)
     speed = "{} km/h".format(speed)
     speed_limit = "{} km/h".format(self._current_speed_limit)
     traffic_light = self._current_traffic_light[0].name
     current_hlc = (
         self._current_hlc.name if self._drive_model_enabled else "Disabled"
     )
     info = [speed, speed_limit, traffic_light, current_hlc]
     self._video_info.append(info)
     self._video_images[0].append(ic.to_rgb_array(self._game_image))
     self._video_images[1].append(ic.to_rgb_array(self._game_image_3p))
コード例 #4
0
    def _on_render(self):
        if self._main_image is not None:
            array = image_converter.to_rgb_array(self._main_image)
            surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))
            self._display.blit(surface, (0, 0))

        pygame.display.flip()
コード例 #5
0
    def sensor_process(self, sensor_data):
        self.image_rgb = np.transpose(
            image_converter.to_rgb_array(sensor_data['CameraRGB']).swapaxes(
                0, 1), (1, 0, 2))
        self.image_depth = np.transpose(
            image_converter.depth_to_logarithmic_grayscale(
                sensor_data['CameraDepth']).swapaxes(0, 1), (1, 0, 2))
        self.image_depth = np.array(self.image_depth, dtype=np.uint8)
        self.image_seg = np.transpose(
            image_converter.labels_to_cityscapes_palette(
                sensor_data['CameraSemSeg']).swapaxes(0, 1), (1, 0, 2))
        self.image_seg = np.array(self.image_seg, dtype=np.uint8)

        if self.image_rgb is not None:
            image_rgb_frame = CvBridge().cv2_to_imgmsg(self.image_rgb, "rgb8")
            self.image_rgb_pub.publish(image_rgb_frame)

        if self.image_depth is not None:
            image_depth_frame = CvBridge().cv2_to_imgmsg(
                self.image_depth, "rgb8")
            self.image_depth_pub.publish(image_depth_frame)

        if self.image_seg is not None:
            image_seg_frame = CvBridge().cv2_to_imgmsg(self.image_seg, "rgb8")
            self.image_seg_pub.publish(image_seg_frame)
コード例 #6
0
 def update_distances(self, msg):
     rgbd_frame = to_rgb_array(msg.data)
     normalized_distances = np.dot(
         rgbd_frame,
         [1.0, 256.0, 256.0 * 256.0]) / (256.0 * 256.0 * 256.0 - 1.0)
     # Compute distances in meters.
     distances = self._rgbd_max_range * normalized_distances
     self._distances.append((msg.timestamp, distances))
コード例 #7
0
    def _on_render(self):
        gap_x = (WINDOW_WIDTH - 2 * MINI_WINDOW_WIDTH) / 3
        mini_image_y = WINDOW_HEIGHT - MINI_WINDOW_HEIGHT - gap_x

        if self._main_image is not None:
            array = image_converter.to_rgb_array(self._main_image)
            surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))
            self._display.blit(surface, (0, 0))

        if self._lidar_measurement is not None:
            lidar_data = np.array(self._lidar_measurement.data[:, :2])
            lidar_data *= 2.0
            lidar_data += 100.0
            lidar_data = np.fabs(lidar_data)
            lidar_data = lidar_data.astype(np.int32)
            lidar_data = np.reshape(lidar_data, (-1, 2))
            #draw lidar
            lidar_img_size = (200, 200, 3)
            lidar_img = np.zeros(lidar_img_size)
            lidar_img[tuple(lidar_data.T)] = (255, 255, 255)
            surface = pygame.surfarray.make_surface(lidar_img)
            self._display.blit(surface, (10, 10))

        if self._map_view is not None:
            array = self._map_view
            array = array[:, :, :3]

            new_window_width = \
                (float(WINDOW_HEIGHT) / float(self._map_shape[0])) * \
                float(self._map_shape[1])
            surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))

            w_pos = int(self._position[0] *
                        (float(WINDOW_HEIGHT) / float(self._map_shape[0])))
            h_pos = int(self._position[1] *
                        (new_window_width / float(self._map_shape[1])))

            pygame.draw.circle(surface, [255, 0, 0, 255], (w_pos, h_pos), 6, 0)
            for agent in self._agent_positions:
                if agent.HasField('vehicle'):
                    agent_position = self._map.convert_to_pixel([
                        agent.vehicle.transform.location.x,
                        agent.vehicle.transform.location.y,
                        agent.vehicle.transform.location.z
                    ])

                    w_pos = int(
                        agent_position[0] *
                        (float(WINDOW_HEIGHT) / float(self._map_shape[0])))
                    h_pos = int(agent_position[1] *
                                (new_window_width / float(self._map_shape[1])))

                    pygame.draw.circle(surface, [255, 0, 255, 255],
                                       (w_pos, h_pos), 4, 0)

            self._display.blit(surface, (WINDOW_WIDTH, 0))

        pygame.display.flip()
コード例 #8
0
    def _on_render(self):
        gap_x = (WINDOW_WIDTH - 2 * MINI_WINDOW_WIDTH) / 3
        mini_image_y = WINDOW_HEIGHT - MINI_WINDOW_HEIGHT - gap_x
        global COUNT
        global frame
       #COUNT+=1
        if self._main_image is not None:
            array = image_converter.to_rgb_array(self._main_image)
            surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))
            self._display.blit(surface, (0, 0))
            if((COUNT%frame)==0):
                pygame.image.save(surface,"E:/RGB/rgb"+format(COUNT)+".png")
           # print("RGB saved")
        if self._mini_view_image2 is not None:
            array = image_converter.labels_to_cityscapes_palette(
                self._mini_view_image2)
            surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))
            if(COUNT%frame==0):
                pygame.image.save(surface,"E:/Semanticdata/Semantic"+format(COUNT)+".png")
           # print("Semantic saved")
            self._display.blit(
                surface, (2 * gap_x + MINI_WINDOW_WIDTH, mini_image_y))

        if self._map_view is not None:
            array = self._map_view
            array = array[:, :, :3]

            new_window_width = \
                (float(WINDOW_HEIGHT) / float(self._map_shape[0])) * \
                float(self._map_shape[1])
            surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))

            w_pos = int(self._position[0]*(float(WINDOW_HEIGHT)/float(self._map_shape[0])))
            h_pos = int(self._position[1] *(new_window_width/float(self._map_shape[1])))

            pygame.draw.circle(surface, [255, 0, 0, 255], (w_pos, h_pos), 6, 0)
            for agent in self._agent_positions:
                if agent.HasField('vehicle'):
                    agent_position = self._map.convert_to_pixel([
                        agent.vehicle.transform.location.x,
                        agent.vehicle.transform.location.y,
                        agent.vehicle.transform.location.z])

                    w_pos = int(agent_position[0]*(float(WINDOW_HEIGHT)/float(self._map_shape[0])))
                    h_pos = int(agent_position[1] *(new_window_width/float(self._map_shape[1])))

                    pygame.draw.circle(surface, [255, 0, 255, 255], (w_pos, h_pos), 4, 0)

            self._display.blit(surface, (WINDOW_WIDTH, 0))

        pygame.display.flip()
コード例 #9
0
    def _render_pygame(self):
        if self._game_image is not None:
            array = ic.to_rgb_array(self._game_image)
            surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))

            if self._game_state == GameState.WRITING:
                self._render_progressbar(
                    surface, 300, 40, self._disk_writer_thread.progress
                )

            self._pygame_display.blit(surface, (0, 0))

        self._render_HUD()

        pygame.display.flip()
コード例 #10
0
    def _save_training_files(self, datapoints, point_cloud):
        logging.info(
            "Attempting to save at timer step {}, frame no: {}".format(
                self._timer.step, self.captured_frame_no))
        groundplane_fname = GROUNDPLANE_PATH.format(self.captured_frame_no)
        lidar_fname = LIDAR_PATH.format(self.captured_frame_no)
        kitti_fname = LABEL_PATH.format(self.captured_frame_no)
        img_fname = IMAGE_PATH.format(self.captured_frame_no)
        calib_filename = CALIBRATION_PATH.format(self.captured_frame_no)

        save_groundplanes(groundplane_fname,
                          self._measurements.player_measurements,
                          LIDAR_HEIGHT_POS)
        save_ref_files(OUTPUT_FOLDER, self.captured_frame_no)
        save_image_data(img_fname,
                        image_converter.to_rgb_array(self._main_image))
        save_kitti_data(kitti_fname, datapoints)
        save_lidar_data(lidar_fname, point_cloud, LIDAR_HEIGHT_POS,
                        LIDAR_DATA_FORMAT)
        save_calibration_matrices(calib_filename, self._intrinsic,
                                  self._extrinsic)
コード例 #11
0
ファイル: recording.py プロジェクト: xuhuazhe/CIL_modular
 def yang_annotate_images(self, sensor_data, directions):
     if self._save_images:
         out = {}
         for name, image in sensor_data.items():
             if isinstance(image, ImageSensorData):
                 numpy_image = image_converter.to_rgb_array(image)
                 txtdt = {2.0: "follow",
                          3.0: "left",
                          4.0: "right",
                          5.0: "straight",
                          0.0: "goal"}
                 viz = self.write_text_on_image(numpy_image, txtdt[directions], 10)
                 # convert the PIL image object to the ImageSensorData
                 viz = viz.convert("RGBA")
                 viz = ImageSensorData(image.frame_number,
                                       image.width,
                                       image.height,
                                       image.type,
                                       image.fov,
                                       viz.tobytes())
                 out[name] = viz
             else:
                 out[name] = image
         return out
コード例 #12
0
ファイル: SDC_test.py プロジェクト: Rwik2000/Autonomous-Car
def run_carla_client(args):

    with make_carla_client(args.host, args.port) as client:
        print('CarlaClient connected')
        while True:

            settings = CarlaSettings()
            settings.set(
                SynchronousMode=True,
                SendNonPlayerAgentsInfo=True,
                NumberOfVehicles=0,
                NumberOfPedestrians=0,
                WeatherId=3,  #1, 3, 7, 8, 14
                QualityLevel=args.quality_level)
            settings.randomize_seeds()

            # Now we want to add a couple of cameras to the player vehicle.
            # We will collect the images produced by these cameras every
            # frame.

            # The default camera captures RGB images of the scene.
            camera0 = sensor.Camera('CameraRGB', PostProcessing='SceneFinal')
            # Set image resolution in pixels.
            w = 800
            h = 600
            camera0.set_image_size(800, 600)
            # Set its position relative to the car in meters.
            camera0.set_position(0.30, 0, 4)
            settings.add_sensor(camera0)

            # Let's add another camera producing ground-truth depth.
            depth_camera = sensor.Camera('CameraDepth', PostProcessing='Depth')
            depth_camera.set_image_size(800, 600)
            depth_camera.set_position(x=0.30, y=0, z=4)
            settings.add_sensor(depth_camera)

            if args.lidar:
                lidar = Lidar('Lidar32')
                lidar.set_position(0, 0, 2.50)
                lidar.set_rotation(0, 0, 0)
                lidar.set(Channels=32,
                          Range=50,
                          PointsPerSecond=100000,
                          RotationFrequency=10,
                          UpperFovLimit=10,
                          LowerFovLimit=-30)
                settings.add_sensor(lidar)

            scene = client.load_settings(settings)

            # Choose one player start at random.
            number_of_player_starts = len(scene.player_start_spots)
            player_start = random.randint(0, max(0,
                                                 number_of_player_starts - 1))

            print('Starting new episode at %r...' % scene.map_name)
            client.start_episode(player_start)
            action_model = roughPPO.actor_model((h, w, 3), (3, 1), (20, 2))
            critic_model = roughPPO.critic_model((h, w, 3), (3, 1), (20, 2),
                                                 (2, 1))
            actor_action = [0, 0]
            critic_score = 0
            ppo_check = 0
            # Iterate every frame in the episode.
            ppo_iter_count = 0
            while True:

                # Read the data produced by the server this frame.

                measurements, sensor_data = client.read_data()
                # print_measurements(measurements)
                main_image = sensor_data.get("CameraRGB", None)

                image_array = image_converter.to_rgb_array(main_image)

                point_cloud = image_converter.depth_to_local_point_cloud(
                    sensor_data['CameraDepth'], image_array, max_depth=1000)

                image_array = cv2.cvtColor(image_array, cv2.COLOR_RGB2BGR)
                cv2.imshow("FrontCam View", image_array)
                reverse = 0
                try:
                    steer_val = 0

                    bez_mask, coord_trajectory, trajectory, world_x_left, world_x_right, world_z_left, world_z_right = floodfill(
                        image_array, point_cloud)

                    track_point_yaw = trajectory[-5]
                    yaw_error = np.arctan(
                        (0 - track_point_yaw[0]) / (track_point_yaw[1]))
                    state_update_long(trajectory)
                    state_update_lat(world_x_left, world_x_right, yaw_error)

                    if ppo_check == 1:
                        reward_score = roughPPO.reward(actor_action,
                                                       world_x_left,
                                                       world_x_right)
                        if len(SDC_data_store.throttle_queue) < 50:
                            SDC_data_store.throttle_queue.append(
                                actor_action[0])
                            SDC_data_store.steer_queue.append(actor_action[1])
                            SDC_data_store.rewards_queue.append(reward_score)
                            SDC_data_store.critic_score.append(critic_score)
                        else:
                            print("IDHAR!!!!!!!!!!")
                            SDC_data_store.throttle_queue.popleft()
                            SDC_data_store.throttle_queue.append(
                                actor_action[0])
                            SDC_data_store.steer_queue.popleft()
                            SDC_data_store.steer_queue.append(actor_action[1])
                            SDC_data_store.rewards_queue.popleft()
                            SDC_data_store.rewards_queue.append(reward_score)
                            SDC_data_store.critic_score.popleft()
                            SDC_data_store.critic_score.append(reward_score)

                        if (len(SDC_data_store.rewards_queue) > 1):
                            returns, advs = roughPPO.advantages(
                                list(SDC_data_store.critic_score),
                                list(SDC_data_store.rewards_queue))
                            loss = roughPPO.ppo_loss(
                                advs, SDC_data_store.rewards_queue,
                                SDC_data_store.critic_score)
                            action_model.compile(optimizer=Adam(lr=1e-3),
                                                 loss=loss)
                            critic_model.compile(optimizer=Adam(lr=1e-3),
                                                 loss='mse')
                            print("LOSS : " + str(loss))
                        ppo_check = 0
                    if ppo_iter_count % 20 == 0 and ppo_iter_count > 0 and ppo_check == 0:
                        ppo_check = 1
                        action_output, critic_output = ppo_input(
                            measurements, trajectory, image_array,
                            action_model, critic_model)
                        actor_action = action_output[0]
                        critic_score = critic_output[0][0]
                        throttle_a = actor_action[0]
                        steer_val = actor_action[1]

                    else:
                        throttle_a = PID_throttle(trajectory, reverse)
                        cv2.imshow("bez", bez_mask)
                        steer_val = PID_steer(trajectory, reverse,
                                              world_x_left, world_x_right)

                    check = SDC_data_store.shadow_check

                    client.send_control(steer=steer_val * check,
                                        throttle=throttle_a * check + 0.5 *
                                        (1 - check),
                                        brake=0.0,
                                        hand_brake=False,
                                        reverse=False)
                    SDC_data_store.turn_value = steer_val
                except Exception as e:
                    print(e)

                    client.send_control(steer=SDC_data_store.turn_value *
                                        check,
                                        throttle=1,
                                        brake=0.85,
                                        hand_brake=True,
                                        reverse=False)

                cv2.waitKey(1)

                if SDC_data_store.count % 10 == 0:
                    print("frame: ", SDC_data_store.count)

                # if SDC_data_store.count%10==0:
                #     SDC_data_store.sum_cte = 0
                SDC_data_store.count += 1
                print("*********\n")
                ppo_iter_count += 1
コード例 #13
0
    def _on_render(self, meas):
        gap_x = (WINDOW_WIDTH - 2 * MINI_WINDOW_WIDTH) / 3
        mini_image_y = WINDOW_HEIGHT - MINI_WINDOW_HEIGHT - gap_x

        #meas.pl
        if self._main_image is not None:
            array = image_converter.to_rgb_array(self._main_image)
            surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))
            self._display.blit(surface, (0, 0))

        if mixed_reality:
            if self._mini_view_image2 is not None:
                array1 = image_converter.labels_to_cityscapes_palette(
                    self._mini_view_image2)
                surface1 = pygame.surfarray.make_surface(array1.swapaxes(0, 1))
                surface1.set_alpha(128)
                self._display.blit(surface1, (0, 0))
        else:
            if self._mini_view_image1 is not None:
                array = image_converter.depth_to_logarithmic_grayscale(
                    self._mini_view_image1)
                surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))
                self._display.blit(surface, (gap_x, mini_image_y))

            if self._mini_view_image2 is not None:
                array = image_converter.labels_to_cityscapes_palette(
                    self._mini_view_image2)
                surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))
                self._display.blit(
                    surface, (2 * gap_x + MINI_WINDOW_WIDTH, mini_image_y))

        myfont = pygame.font.SysFont("sans", 25, bold=True)
        label = myfont.render(("Current Speed: %f km/h" %
                               meas.player_measurements.forward_speed), 1,
                              (255, 255, 0))
        self._display.blit(label, (WINDOW_WIDTH / 2, WINDOW_HEIGHT - 40))

        neighborAgents = [agent for agent in meas.non_player_agents if scipy.spatial.distance.cdist([[meas.player_measurements.transform.location.x, \
                                                  meas.player_measurements.transform.location.y]], \
                    [[agent.vehicle.transform.location.x,agent.vehicle.transform.location.y]]) < 50e2]

        myfont = pygame.font.SysFont("sans", 20, bold=True)

        for agent in neighborAgents:
            if agent.HasField('vehicle'):
                # print(agent.vehicle.transform.location)
                label = myfont.render(("Vehicle nearby"), 1, (255, 0, 255))
                self._display.blit(label, (WINDOW_WIDTH / 2, 40))

            if agent.HasField('pedestrian'):
                # print(agent.vehicle.transform.location)
                label = myfont.render(("Pedestrian nearby"), 1, (255, 0, 255))
                self._display.blit(label, (WINDOW_WIDTH / 2, 80))

        if self._map_view is not None:
            array = self._map_view
            array = array[:, :, :3]
            new_window_width = (float(WINDOW_HEIGHT) / float(
                self._map_shape[0])) * float(self._map_shape[1])
            surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))

            w_pos = int(self._position[0] *
                        (float(WINDOW_HEIGHT) / float(self._map_shape[0])))
            h_pos = int(self._position[1] *
                        (new_window_width / float(self._map_shape[1])))

            pygame.draw.circle(surface, [255, 0, 0, 255], (w_pos, h_pos), 6, 0)
            for agent in self._agent_positions:
                if agent.HasField('vehicle'):
                    print("found non-player agent")
                    agent_position = self._map.get_position_on_map([
                        agent.vehicle.transform.location.x,
                        agent.vehicle.transform.location.y,
                        agent.vehicle.transform.location.z
                    ])
                    w_pos = int(
                        agent_position[0] *
                        (float(WINDOW_HEIGHT) / float(self._map_shape[0])))
                    h_pos = int(agent_position[1] *
                                (new_window_width / float(self._map_shape[1])))
                    pygame.draw.circle(surface, [255, 0, 255, 255],
                                       (w_pos, h_pos), 4, 0)

            self._display.blit(surface, (WINDOW_WIDTH, 0))

        pygame.display.flip()
コード例 #14
0
    def _on_loop(self):
        self._timer.tick()

        measurements, sensor_data = self.client.read_data()

        self._main_image = sensor_data.get('CameraRGB_main', None)
        self._mini_view_image1 = sensor_data.get('CameraDepth', None)
        self._mini_view_image2 = sensor_data.get('CameraSemSeg', None)
        self._lidar_measurement = sensor_data.get('Lidar32', None)
        self._h5_image = sensor_data.get('CameraRGB_h5', None)

        timediff = self._timer.elapsed_seconds_since_lap()

        # Print measurements every second.
        if timediff > 1.0:
            if self._city_name is not None:
                # Function to get car position on map.
                map_position = self._map.convert_to_pixel([
                    measurements.player_measurements.transform.location.x,
                    measurements.player_measurements.transform.location.y,
                    measurements.player_measurements.transform.location.z])
                # Function to get orientation of the road car is in.
                lane_orientation = self._map.get_lane_orientation([
                    measurements.player_measurements.transform.location.x,
                    measurements.player_measurements.transform.location.y,
                    measurements.player_measurements.transform.location.z])

                self._print_player_measurements_map(
                    measurements.player_measurements,
                    map_position,
                    lane_orientation)
            else:
                self._print_player_measurements(measurements.player_measurements)

            # Plot position on the map as well.

            self._timer.lap()

        control = self._get_keyboard_control(pygame.key.get_pressed(), measurements, sensor_data)
        # Set the player position
        if self._city_name is not None:
            self._position = self._map.convert_to_pixel([
                measurements.player_measurements.transform.location.x,
                measurements.player_measurements.transform.location.y,
                measurements.player_measurements.transform.location.z])
            self._agent_positions = measurements.non_player_agents

        #print(control)
        if control is None:
            self._on_new_episode()
        elif self._enable_autopilot:
            self.client.send_control(measurements.player_measurements.autopilot_control)
        else:
            self.client.send_control(control)

        ### Saving on autopilot ###

        if (self._enable_autopilot) and (timediff > 0.03) and (self._h5_image is not None):

            steer = measurements.player_measurements.autopilot_control.steer 
            throttle = measurements.player_measurements.autopilot_control.steer
            brake = measurements.player_measurements.autopilot_control.brake
            hand_brake = measurements.player_measurements.autopilot_control.hand_brake
            reverse = measurements.player_measurements.autopilot_control.reverse

            s_image = image_converter.to_rgb_array(self._h5_image)
            s_measurements = [
                steer,
                throttle,
                brake,
                hand_brake,
                reverse,
                0.0,
                0.0,
                0.0,
                measurements.player_measurements.transform.location.x,
                measurements.player_measurements.transform.location.y,
                measurements.player_measurements.forward_speed,
                measurements.player_measurements.collision_other,
                measurements.player_measurements.collision_pedestrians,
                measurements.player_measurements.collision_vehicles,
                measurements.player_measurements.intersection_otherlane,
                measurements.player_measurements.intersection_offroad,
                measurements.player_measurements.acceleration.x,
                measurements.player_measurements.acceleration.y,
                measurements.player_measurements.acceleration.z,
                measurements.platform_timestamp,
                measurements.game_timestamp,
                measurements.player_measurements.transform.orientation.x,
                measurements.player_measurements.transform.orientation.y,
                measurements.player_measurements.transform.orientation.z,
                -1,
                0.0,
                0.0,
                0.0,
            ]


            if brake == 1.0:
                self.brake_count += 1
            else:
                self.brake_count = 0
            # Need to handle intersection data separately
            # When stopping at stop light, labels can be random
            # When turning, driving labels need to be populated retrospectively
            # For lane following, just populate in large batch
            if self._at_intersection:
                if self._drivestate == 0:
                    self.saveh5s()
                    #print("Saved command {}".format(2))
                    self._drivestate = 1

                self._save_holder.append([copy.deepcopy(s_image), copy.deepcopy(s_measurements)])
                self._save_holder_stop.append([copy.deepcopy(s_image), copy.deepcopy(s_measurements)])

                if self.brake_count > 3:
                    #print("Stopped at light")
                    self._save_holder[-1][1][24] = -2
                    self._save_holder_stop[-1][1][24] = -2
                elif steer < -0.01:
                    #print("Turning left")
                    self._cmd = 3
                elif steer > 0.01:
                    #print("Turning right")
                    self._cmd = 4
                else:
                    #print("Going straight")
                    pass
            else:
                if self._drivestate == 1:
                    for i in range(len(self._save_holder)):
                        if self._save_holder[i][1][24] == -1:
                            self._save_holder[i][1][24] = self._cmd
                            self._save_holder_stop[i][1][24] = self._cmd
                        else:
                            self._save_holder[i][1][24] = self._cmd
                            self._save_holder_stop[i][1][24] = 6

                    self.saveh5s()
                    #print("Saved command {}".format(self._cmd))
                    self._cmd = 5
                    self._drivestate = 0

                self._save_holder.append([copy.deepcopy(s_image), copy.deepcopy(s_measurements)])
                self._save_holder_stop.append([copy.deepcopy(s_image), copy.deepcopy(s_measurements)])

                #print("Following lane")
                self._save_holder[-1][1][24] = 2
                if self.brake_count > 3:
                    self._save_holder_stop[-1][1][24] = 6
                else:
                    self._save_holder_stop[-1][1][24] = 2
コード例 #15
0
def run_carla_client(args):
    # Here we will run 3 episodes with 300 frames each.
    number_of_episodes = 30000000
    # frames_per_episode = 300

    # We assume the CARLA server is already waiting for a client to connect at
    # host:port. To create a connection we can use the `make_carla_client`
    # context manager, it creates a CARLA client object and starts the
    # connection. It will throw an exception if something goes wrong. The
    # context manager makes sure the connection is always cleaned up on exit.
    global client

    with make_carla_client(args.host, args.port) as client:
        print('CarlaClient connected')

        for episode in range(0, number_of_episodes):
            # Start a new episode.

            if args.settings_filepath is None:

                # Create a CarlaSettings object. This object is a wrapper around
                # the CarlaSettings.ini file. Here we set the configuration we
                # want for the new episode.
                settings = CarlaSettings()
                settings.set(
                    SynchronousMode=True,
                    SendNonPlayerAgentsInfo=True,
                    NumberOfVehicles=0,
                    NumberOfPedestrians=0,
                    # WeatherId=random.choice([1, 3, 7, 8, 14]),
                    WeatherId=1,
                    QualityLevel=args.quality_level)
                settings.randomize_seeds()

                # Now we want to add a couple of cameras to the player vehicle.
                # We will collect the images produced by these cameras every
                # frame.

                # The default camera captures RGB images of the scene.
                camera0 = Camera('CameraRGB')
                # Set image resolution in pixels.
                camera0.set_image_size(210, 160)
                # Set its position relative to the car in meters.
                camera0.set_position(0.30, 0, 1.30)
                settings.add_sensor(camera0)

                # Let's add another camera producing ground-truth depth.
                camera1 = Camera('CameraDepth', PostProcessing='Depth')
                camera1.set_image_size(210, 160)
                camera1.set_position(0.30, 0, 1.30)
                settings.add_sensor(camera1)

            # Now we load these settings into the server. The server replies
            # with a scene description containing the available start spots for
            # the player. Here we can provide a CarlaSettings object or a
            # CarlaSettings.ini file as string.
            scene = client.load_settings(settings)

            # Choose one player start at random.
            # number_of_player_starts = len(scene.player_start_spots)
            # player_start = random.randint(0, max(0, number_of_player_starts - 1))

            # Notify the server that we want to start the episode at the
            # player_start index. This function blocks until the server is ready
            # to start the episode.
            print('Starting new episode at %r...' % scene.map_name)

            client.start_episode(0)

            # Iterate every frame in the episode.

            # Read the data produced by the server this frame.
            measurements, sensor_data = client.read_data()
            # Print some of the measurements.

            player_measurements = measurements.player_measurements
            global other_lane
            global offroad
            other_lane = 100 * player_measurements.intersection_otherlane
            offroad = 100 * player_measurements.intersection_offroad
            global done
            done = False
            image_RGB = to_rgb_array(sensor_data['CameraRGB'])
            global image_RGB_real
            image_RGB_real = image_RGB.flatten()
            re_image(image_RGB_real)

            print_measurements(measurements)
            global reward
            reward = -other_lane - offroad + np.sqrt(
                np.square(player_measurements.transform.location.x - 271.0) +
                np.square(player_measurements.transform.location.y - 129.5))
            col = player_measurements.collision_other
            if offroad > 10 or other_lane > 10 or col > 0:
                print(111111111111111111111)
                done = True
コード例 #16
0
def run_carla_client(host, port, far):
    # Here we will run a single episode with 300 frames.
    number_of_frames = 30
    frame_step = 10  # Save one image every 100 frames

    image_size = [800, 600]
    camera_local_pos = [0.3, 0.0, 1.3]  # [X, Y, Z]
    camera_local_rotation = [0, 0, 0]  # [pitch(Y), yaw(Z), roll(X)]
    fov = 70
    autopilot = False
    control = VehicleControl()
    for start_i in range(150):
        output_folder = '/home2/mariap/Packages/CARLA_0.8.2/PythonClient/_out/pos_' + str(
            start_i)
        if not os.path.exists(output_folder):
            os.mkdir(output_folder)
            print("make " + str(output_folder))

    # Connect with the server
    with make_carla_client(host, port) as client:
        print('CarlaClient connected')
        for start_i in range(150):
            output_folder = '/home2/mariap/Packages/CARLA_0.8.2/PythonClient/_out/pos_' + str(
                start_i)
            print(output_folder)

            # Here we load the settings.
            settings = CarlaSettings()
            settings.set(SynchronousMode=True,
                         SendNonPlayerAgentsInfo=True,
                         NumberOfVehicles=100,
                         NumberOfPedestrians=500,
                         WeatherId=random.choice([1, 3, 7, 8, 14]))
            settings.randomize_seeds()

            camera1 = Camera('CameraDepth', PostProcessing='Depth', FOV=fov)
            camera1.set_image_size(*image_size)
            camera1.set_position(*camera_local_pos)
            camera1.set_rotation(*camera_local_rotation)
            settings.add_sensor(camera1)

            camera2 = Camera('CameraRGB', PostProcessing='SceneFinal', FOV=fov)
            camera2.set_image_size(*image_size)
            camera2.set_position(*camera_local_pos)
            camera2.set_rotation(*camera_local_rotation)
            settings.add_sensor(camera2)

            camera3 = Camera('CameraSeg',
                             PostProcessing='SemanticSegmentation')
            camera3.set_image_size(*image_size)
            camera3.set_position(*camera_local_pos)
            camera3.set_rotation(*camera_local_rotation)
            settings.add_sensor(camera3)

            client.load_settings(settings)

            # Start at location index id '0'
            client.start_episode(start_i)

            cameras_dict = {}
            pedestrians_dict = {}
            cars_dict = {}
            # Compute the camera transform matrix
            camera_to_car_transform = camera2.get_unreal_transform()
            # (Intrinsic) (3, 3) K Matrix
            K = np.identity(3)
            K[0, 2] = image_size[0] / 2.0
            K[1, 2] = image_size[1] / 2.0
            K[0,
              0] = K[1,
                     1] = image_size[0] / (2.0 * np.tan(fov * np.pi / 360.0))
            with open(output_folder + '/camera_intrinsics.p', 'w') as camfile:
                pickle.dump(K, camfile)

            # Iterate every frame in the episode except for the first one.
            for frame in range(1, number_of_frames):
                # Read the data produced by the server this frame.
                measurements, sensor_data = client.read_data()

                # Save one image every 'frame_step' frames
                if not frame % frame_step:
                    for name, measurement in sensor_data.items():
                        filename = '{:s}/{:0>6d}'.format(name, frame)
                        measurement.save_to_disk(
                            os.path.join(output_folder, filename))
                    # Start transformations time mesure.
                    timer = StopWatch()

                    # RGB image [[[r,g,b],..[r,g,b]],..[[r,g,b],..[r,g,b]]]
                    image_RGB = to_rgb_array(sensor_data['CameraRGB'])
                    image_seg = np.tile(
                        labels_to_array(sensor_data['CameraSeg']), (1, 1, 3))

                    # 2d to (camera) local 3d
                    # We use the image_RGB to colorize each 3D point, this is optional.
                    # "max_depth" is used to keep only the points that are near to the
                    # camera, meaning 1.0 the farest points (sky)
                    point_cloud = depth_to_local_point_cloud(
                        sensor_data['CameraDepth'], image_RGB, max_depth=far)

                    point_cloud_seg = depth_to_local_point_cloud(
                        sensor_data['CameraDepth'], image_seg, max_depth=far)

                    # (Camera) local 3d to world 3d.
                    # Get the transform from the player protobuf transformation.
                    world_transform = Transform(
                        measurements.player_measurements.transform)

                    # Compute the final transformation matrix.
                    car_to_world_transform = world_transform * camera_to_car_transform

                    # Car to World transformation given the 3D points and the
                    # transformation matrix.
                    point_cloud.apply_transform(car_to_world_transform)
                    point_cloud_seg.apply_transform(car_to_world_transform)

                    Rt = car_to_world_transform.matrix
                    Rt_inv = car_to_world_transform.inverse(
                    ).matrix  # Rt_inv is the world to camera matrix !!
                    #R_inv=world_transform.inverse().matrix
                    cameras_dict[frame] = {}
                    cameras_dict[frame]['inverse_rotation'] = Rt_inv[:]
                    cameras_dict[frame]['rotation'] = Rt[:]
                    cameras_dict[frame]['translation'] = Rt_inv[0:3, 3]
                    cameras_dict[frame][
                        'camera_to_car'] = camera_to_car_transform.matrix

                    # Get non-player info
                    vehicles = {}
                    pedestrians = {}
                    for agent in measurements.non_player_agents:
                        # check if the agent is a vehicle.
                        if agent.HasField('vehicle'):
                            pos = agent.vehicle.transform.location
                            pos_vector = np.array([[pos.x], [pos.y], [pos.z],
                                                   [1.0]])

                            trnasformed_3d_pos = np.dot(Rt_inv, pos_vector)
                            pos2d = np.dot(K, trnasformed_3d_pos[:3])

                            # Normalize the point
                            norm_pos2d = np.array([
                                pos2d[0] / pos2d[2], pos2d[1] / pos2d[2],
                                pos2d[2]
                            ])

                            # Now, pos2d contains the [x, y, d] values of the image in pixels (where d is the depth)
                            # You can use the depth to know the points that are in front of the camera (positive depth).

                            x_2d = image_size[0] - norm_pos2d[0]
                            y_2d = image_size[1] - norm_pos2d[1]
                            vehicles[agent.id] = {}
                            vehicles[agent.id]['transform'] = [
                                agent.vehicle.transform.location.x,
                                agent.vehicle.transform.location.y,
                                agent.vehicle.transform.location.z
                            ]
                            vehicles[agent.id][
                                'bounding_box.transform'] = agent.vehicle.bounding_box.transform.location.z

                            vehicles[agent.id][
                                'yaw'] = agent.vehicle.transform.rotation.yaw
                            vehicles[agent.id]['bounding_box'] = [
                                agent.vehicle.bounding_box.extent.x,
                                agent.vehicle.bounding_box.extent.y,
                                agent.vehicle.bounding_box.extent.z
                            ]
                            vehicle_transform = Transform(
                                agent.vehicle.bounding_box.transform)
                            pos = agent.vehicle.transform.location

                            bbox3d = agent.vehicle.bounding_box.extent

                            # Compute the 3D bounding boxes
                            # f contains the 4 points that corresponds to the bottom
                            f = np.array([[
                                pos.x + bbox3d.x, pos.y - bbox3d.y,
                                pos.z - bbox3d.z +
                                agent.vehicle.bounding_box.transform.location.z
                            ],
                                          [
                                              pos.x + bbox3d.x,
                                              pos.y + bbox3d.y,
                                              pos.z - bbox3d.z + agent.vehicle.
                                              bounding_box.transform.location.z
                                          ],
                                          [
                                              pos.x - bbox3d.x,
                                              pos.y + bbox3d.y,
                                              pos.z - bbox3d.z + agent.vehicle.
                                              bounding_box.transform.location.z
                                          ],
                                          [
                                              pos.x - bbox3d.x,
                                              pos.y - bbox3d.y,
                                              pos.z - bbox3d.z + agent.vehicle.
                                              bounding_box.transform.location.z
                                          ],
                                          [
                                              pos.x + bbox3d.x,
                                              pos.y - bbox3d.y,
                                              pos.z + bbox3d.z + agent.vehicle.
                                              bounding_box.transform.location.z
                                          ],
                                          [
                                              pos.x + bbox3d.x,
                                              pos.y + bbox3d.y,
                                              pos.z + bbox3d.z + agent.vehicle.
                                              bounding_box.transform.location.z
                                          ],
                                          [
                                              pos.x - bbox3d.x,
                                              pos.y + bbox3d.y,
                                              pos.z + bbox3d.z + agent.vehicle.
                                              bounding_box.transform.location.z
                                          ],
                                          [
                                              pos.x - bbox3d.x,
                                              pos.y - bbox3d.y,
                                              pos.z + bbox3d.z + agent.vehicle.
                                              bounding_box.transform.location.z
                                          ]])

                            f_rotated = vehicle_transform.transform_points(f)
                            f_2D_rotated = []
                            vehicles[
                                agent.id]['bounding_box_coord'] = f_rotated

                            for i in range(f.shape[0]):
                                point = np.array([[f_rotated[i, 0]],
                                                  [f_rotated[i, 1]],
                                                  [f_rotated[i, 2]], [1]])
                                transformed_2d_pos = np.dot(
                                    Rt_inv,
                                    point)  # 3d Position in camera space
                                pos2d = np.dot(
                                    K, transformed_2d_pos[:3]
                                )  # Conversion to camera frustum space
                                norm_pos2d = np.array([
                                    pos2d[0] / pos2d[2], pos2d[1] / pos2d[2],
                                    pos2d[2]
                                ])
                                #print([image_size[0] - (pos2d[0] / pos2d[2]), image_size[1] - (pos2d[1] / pos2d[2])])
                                f_2D_rotated.append(
                                    np.array([
                                        image_size[0] - norm_pos2d[0],
                                        image_size[1] - norm_pos2d[1]
                                    ]))
                            vehicles[
                                agent.id]['bounding_box_2D'] = f_2D_rotated

                        elif agent.HasField('pedestrian'):

                            pedestrians[agent.id] = {}
                            pedestrians[agent.id]['transform'] = [
                                agent.pedestrian.transform.location.x,
                                agent.pedestrian.transform.location.y,
                                agent.pedestrian.transform.location.z
                            ]
                            pedestrians[agent.id][
                                'yaw'] = agent.pedestrian.transform.rotation.yaw
                            pedestrians[agent.id]['bounding_box'] = [
                                agent.pedestrian.bounding_box.extent.x,
                                agent.pedestrian.bounding_box.extent.y,
                                agent.pedestrian.bounding_box.extent.z
                            ]
                            vehicle_transform = Transform(
                                agent.pedestrian.bounding_box.transform)
                            pos = agent.pedestrian.transform.location

                            bbox3d = agent.pedestrian.bounding_box.extent

                            # Compute the 3D bounding boxes
                            # f contains the 4 points that corresponds to the bottom
                            f = np.array(
                                [[pos.x + bbox3d.x, pos.y - bbox3d.y, pos.z],
                                 [pos.x + bbox3d.x, pos.y + bbox3d.y, pos.z],
                                 [pos.x - bbox3d.x, pos.y + bbox3d.y, pos.z],
                                 [pos.x - bbox3d.x, pos.y - bbox3d.y, pos.z],
                                 [
                                     pos.x + bbox3d.x, pos.y - bbox3d.y,
                                     pos.z + bbox3d.z
                                 ],
                                 [
                                     pos.x + bbox3d.x, pos.y + bbox3d.y,
                                     pos.z + bbox3d.z
                                 ],
                                 [
                                     pos.x - bbox3d.x, pos.y + bbox3d.y,
                                     pos.z + bbox3d.z
                                 ],
                                 [
                                     pos.x - bbox3d.x, pos.y - bbox3d.y,
                                     pos.z + bbox3d.z
                                 ]])

                            f_rotated = vehicle_transform.transform_points(f)
                            pedestrians[
                                agent.id]['bounding_box_coord'] = f_rotated
                            f_2D_rotated = []

                            for i in range(f.shape[0]):
                                point = np.array([[f_rotated[i, 0]],
                                                  [f_rotated[i, 1]],
                                                  [f_rotated[i, 2]], [1]])
                                transformed_2d_pos = np.dot(
                                    Rt_inv, point)  # See above for cars
                                pos2d = np.dot(K, transformed_2d_pos[:3])
                                norm_pos2d = np.array([
                                    pos2d[0] / pos2d[2], pos2d[1] / pos2d[2],
                                    pos2d[2]
                                ])
                                f_2D_rotated.append([
                                    image_size[0] - norm_pos2d[0],
                                    image_size[1] - norm_pos2d[1]
                                ])
                            pedestrians[
                                agent.id]['bounding_box_2D'] = f_2D_rotated

                    cars_dict[frame] = vehicles
                    pedestrians_dict[frame] = pedestrians

                    # End transformations time mesure.
                    timer.stop()

                    # Save PLY to disk
                    # This generates the PLY string with the 3D points and the RGB colors
                    # for each row of the file.
                    point_cloud.save_to_disk(
                        os.path.join(output_folder,
                                     '{:0>5}.ply'.format(frame)))
                    point_cloud_seg.save_to_disk(
                        os.path.join(output_folder,
                                     '{:0>5}_seg.ply'.format(frame)))

                    print_message(timer.milliseconds(), len(point_cloud),
                                  frame)

                if autopilot:
                    client.send_control(
                        measurements.player_measurements.autopilot_control)
                else:
                    control.hand_brake = True
                    client.send_control(control)
            with open(output_folder + '/cameras.p', 'w') as camerafile:
                pickle.dump(cameras_dict, camerafile)
                print(output_folder + "cameras.p")
            with open(output_folder + '/people.p', 'w') as peoplefile:
                pickle.dump(pedestrians_dict, peoplefile)
            with open(output_folder + '/cars.p', 'w') as carfile:
                pickle.dump(cars_dict, carfile)
コード例 #17
0
ファイル: point_cloud_example.py プロジェクト: cyy1991/carla
def run_carla_client(host, port, far):
    # Here we will run a single episode with 300 frames.
    number_of_frames = 3000
    frame_step = 100  # Save one image every 100 frames
    output_folder = '_out'
    image_size = [800, 600]
    camera_local_pos = [0.3, 0.0, 1.3] # [X, Y, Z]
    camera_local_rotation = [0, 0, 0]  # [pitch(Y), yaw(Z), roll(X)]
    fov = 70

    # Connect with the server
    with make_carla_client(host, port) as client:
        print('CarlaClient connected')

        # Here we load the settings.
        settings = CarlaSettings()
        settings.set(
            SynchronousMode=True,
            SendNonPlayerAgentsInfo=False,
            NumberOfVehicles=20,
            NumberOfPedestrians=40,
            WeatherId=random.choice([1, 3, 7, 8, 14]))
        settings.randomize_seeds()

        camera1 = Camera('CameraDepth', PostProcessing='Depth', FOV=fov)
        camera1.set_image_size(*image_size)
        camera1.set_position(*camera_local_pos)
        camera1.set_rotation(*camera_local_rotation)
        settings.add_sensor(camera1)

        camera2 = Camera('CameraRGB', PostProcessing='SceneFinal', FOV=fov)
        camera2.set_image_size(*image_size)
        camera2.set_position(*camera_local_pos)
        camera2.set_rotation(*camera_local_rotation)
        settings.add_sensor(camera2)

        client.load_settings(settings)

        # Start at location index id '0'
        client.start_episode(0)

        # Compute the camera transform matrix
        camera_to_car_transform = camera2.get_unreal_transform()

        # Iterate every frame in the episode except for the first one.
        for frame in range(1, number_of_frames):
            # Read the data produced by the server this frame.
            measurements, sensor_data = client.read_data()

            # Save one image every 'frame_step' frames
            if not frame % frame_step:
                # Start transformations time mesure.
                timer = StopWatch()

                # RGB image [[[r,g,b],..[r,g,b]],..[[r,g,b],..[r,g,b]]]
                image_RGB = to_rgb_array(sensor_data['CameraRGB'])

                # 2d to (camera) local 3d
                # We use the image_RGB to colorize each 3D point, this is optional.
                # "max_depth" is used to keep only the points that are near to the
                # camera, meaning 1.0 the farest points (sky)
                point_cloud = depth_to_local_point_cloud(
                    sensor_data['CameraDepth'],
                    image_RGB,
                    max_depth=far
                )

                # (Camera) local 3d to world 3d.
                # Get the transform from the player protobuf transformation.
                world_transform = Transform(
                    measurements.player_measurements.transform
                )

                # Compute the final transformation matrix.
                car_to_world_transform = world_transform * camera_to_car_transform

                # Car to World transformation given the 3D points and the
                # transformation matrix.
                point_cloud.apply_transform(car_to_world_transform)

                # End transformations time mesure.
                timer.stop()

                # Save PLY to disk
                # This generates the PLY string with the 3D points and the RGB colors
                # for each row of the file.
                point_cloud.save_to_disk(os.path.join(
                    output_folder, '{:0>5}.ply'.format(frame))
                )

                print_message(timer.milliseconds(), len(point_cloud), frame)

            client.send_control(
                measurements.player_measurements.autopilot_control
            )
コード例 #18
0
def exec_waypoint_nav_demo(args):
    """ Executes waypoint navigation demo.
    """

    with make_carla_client(args.host, args.port) as client:
        print('Carla client connected.')

        date = "2021_10_15"
        trial = 1
        mdl = 'model-' + date + '-' + str(trial)
        model = load_model(os.path.join("models", mdl))

        settings = make_carla_settings(args)

        # Now we load these settings into the server. The server replies
        # with a scene description containing the available start spots for
        # the player. Here we can provide a CarlaSettings object or a
        # CarlaSettings.ini file as string.
        scene = client.load_settings(settings)

        # Refer to the player start folder in the WorldOutliner to see the
        # player start information
        player_start = PLAYER_START_INDEX

        # Notify the server that we want to start the episode at the
        # player_start index. This function blocks until the server is ready
        # to start the episode.
        print('Starting new episode at %r...' % scene.map_name)
        client.start_episode(player_start)

        #############################################
        # Load Configurations
        #############################################

        # Load configuration file (options.cfg) and then parses for the various
        # options. Here we have two main options:
        # live_plotting and live_plotting_period, which controls whether
        # live plotting is enabled or how often the live plotter updates
        # during the simulation run.
        config = configparser.ConfigParser()
        config.read(
            os.path.join(os.path.dirname(os.path.realpath(__file__)),
                         'options.cfg'))
        demo_opt = config['Demo Parameters']

        # Get options
        enable_live_plot = demo_opt.get('live_plotting', 'true').capitalize()
        enable_live_plot = enable_live_plot == 'True'
        live_plot_period = float(demo_opt.get('live_plotting_period', 0))

        # Set options
        live_plot_timer = Timer(live_plot_period)

        #############################################
        # Load Waypoints
        #############################################
        # Opens the waypoint file and stores it to "waypoints"
        waypoints_file = WAYPOINTS_FILENAME
        waypoints_np = None
        with open(waypoints_file) as waypoints_file_handle:
            waypoints = list(
                csv.reader(waypoints_file_handle,
                           delimiter=',',
                           quoting=csv.QUOTE_NONNUMERIC))
            waypoints_np = np.array(waypoints)

        # Because the waypoints are discrete and our controller performs better
        # with a continuous path, here we will send a subset of the waypoints
        # within some lookahead distance from the closest point to the vehicle.
        # Interpolating between each waypoint will provide a finer resolution
        # path and make it more "continuous". A simple linear interpolation
        # is used as a preliminary method to address this issue, though it is
        # better addressed with better interpolation methods (spline
        # interpolation, for example).
        # More appropriate interpolation methods will not be used here for the
        # sake of demonstration on what effects discrete paths can have on
        # the controller. It is made much more obvious with linear
        # interpolation, because in a way part of the path will be continuous
        # while the discontinuous parts (which happens at the waypoints) will
        # show just what sort of effects these points have on the controller.
        # Can you spot these during the simulation? If so, how can you further
        # reduce these effects?

        # Linear interpolation computations
        # Compute a list of distances between waypoints
        wp_distance = []  # distance array
        for i in range(1, waypoints_np.shape[0]):
            wp_distance.append(
                np.sqrt((waypoints_np[i, 0] - waypoints_np[i - 1, 0])**2 +
                        (waypoints_np[i, 1] - waypoints_np[i - 1, 1])**2))
        wp_distance.append(0)  # last distance is 0 because it is the distance
        # from the last waypoint to the last waypoint

        # Linearly interpolate between waypoints and store in a list
        wp_interp = []  # interpolated values
        # (rows = waypoints, columns = [x, y, v])
        wp_interp_hash = []  # hash table which indexes waypoints_np
        # to the index of the waypoint in wp_interp
        interp_counter = 0  # counter for current interpolated point index
        for i in range(waypoints_np.shape[0] - 1):
            # Add original waypoint to interpolated waypoints list (and append
            # it to the hash table)
            wp_interp.append(list(waypoints_np[i]))
            wp_interp_hash.append(interp_counter)
            interp_counter += 1

            # Interpolate to the next waypoint. First compute the number of
            # points to interpolate based on the desired resolution and
            # incrementally add interpolated points until the next waypoint
            # is about to be reached.
            num_pts_to_interp = int(np.floor(wp_distance[i] /\
                                         float(INTERP_DISTANCE_RES)) - 1)
            wp_vector = waypoints_np[i + 1] - waypoints_np[i]
            wp_uvector = wp_vector / np.linalg.norm(wp_vector)
            for j in range(num_pts_to_interp):
                next_wp_vector = INTERP_DISTANCE_RES * float(j +
                                                             1) * wp_uvector
                wp_interp.append(list(waypoints_np[i] + next_wp_vector))
                interp_counter += 1
        # add last waypoint at the end
        wp_interp.append(list(waypoints_np[-1]))
        wp_interp_hash.append(interp_counter)
        interp_counter += 1

        #############################################
        # Controller 2D Class Declaration
        #############################################
        # This is where we take the controller2d.py class
        # and apply it to the simulator
        controller = controller2d.Controller2D(waypoints)

        #############################################
        # Determine simulation average timestep (and total frames)
        #############################################
        # Ensure at least one frame is used to compute average timestep
        num_iterations = ITER_FOR_SIM_TIMESTEP
        if (ITER_FOR_SIM_TIMESTEP < 1):
            num_iterations = 1

        # Gather current data from the CARLA server. This is used to get the
        # simulator starting game time. Note that we also need to
        # send a command back to the CARLA server because synchronous mode
        # is enabled.
        measurement_data, sensor_data = client.read_data()
        sim_start_stamp = measurement_data.game_timestamp / 1000.0
        # Send a control command to proceed to next iteration.
        # This mainly applies for simulations that are in synchronous mode.
        send_control_command(client, throttle=0.0, steer=0, brake=1.0)
        # Computes the average timestep based on several initial iterations
        sim_duration = 0
        for i in range(num_iterations):
            # Gather current data
            measurement_data, sensor_data = client.read_data()
            # Send a control command to proceed to next iteration
            send_control_command(client, throttle=0.0, steer=0, brake=1.0)
            # Last stamp
            if i == num_iterations - 1:
                sim_duration = measurement_data.game_timestamp / 1000.0 -\
                               sim_start_stamp

        # Outputs average simulation timestep and computes how many frames
        # will elapse before the simulation should end based on various
        # parameters that we set in the beginning.
        SIMULATION_TIME_STEP = sim_duration / float(num_iterations)
        print("SERVER SIMULATION STEP APPROXIMATION: " + \
              str(SIMULATION_TIME_STEP))
        TOTAL_EPISODE_FRAMES = int((TOTAL_RUN_TIME + WAIT_TIME_BEFORE_START) /\
                               SIMULATION_TIME_STEP) + TOTAL_FRAME_BUFFER

        #############################################
        # Frame-by-Frame Iteration and Initialization
        #############################################
        # Store pose history starting from the start position
        measurement_data, sensor_data = client.read_data()
        start_x, start_y, start_yaw = get_current_pose(measurement_data)
        send_control_command(client, throttle=0.0, steer=0, brake=1.0)
        x_history = [start_x]
        y_history = [start_y]
        yaw_history = [start_yaw]
        time_history = [0]
        speed_history = [0]

        #############################################
        # Vehicle Trajectory Live Plotting Setup
        #############################################
        # Uses the live plotter to generate live feedback during the simulation
        # The two feedback includes the trajectory feedback and
        # the controller feedback (which includes the speed tracking).
        lp_traj = lv.LivePlotter(tk_title="Trajectory Trace")
        lp_1d = lv.LivePlotter(tk_title="Controls Feedback")

        ###
        # Add 2D position / trajectory plot
        ###
        trajectory_fig = lp_traj.plot_new_dynamic_2d_figure(
            title='Vehicle Trajectory',
            figsize=(FIGSIZE_X_INCHES, FIGSIZE_Y_INCHES),
            edgecolor="black",
            rect=[PLOT_LEFT, PLOT_BOT, PLOT_WIDTH, PLOT_HEIGHT])

        trajectory_fig.set_invert_x_axis()  # Because UE4 uses left-handed
        # coordinate system the X
        # axis in the graph is flipped
        trajectory_fig.set_axis_equal()  # X-Y spacing should be equal in size

        # Add waypoint markers
        trajectory_fig.add_graph("waypoints",
                                 window_size=waypoints_np.shape[0],
                                 x0=waypoints_np[:, 0],
                                 y0=waypoints_np[:, 1],
                                 linestyle="-",
                                 marker="",
                                 color='g')
        # Add trajectory markers
        trajectory_fig.add_graph("trajectory",
                                 window_size=TOTAL_EPISODE_FRAMES,
                                 x0=[start_x] * TOTAL_EPISODE_FRAMES,
                                 y0=[start_y] * TOTAL_EPISODE_FRAMES,
                                 color=[1, 0.5, 0])
        # Add lookahead path
        trajectory_fig.add_graph("lookahead_path",
                                 window_size=INTERP_MAX_POINTS_PLOT,
                                 x0=[start_x] * INTERP_MAX_POINTS_PLOT,
                                 y0=[start_y] * INTERP_MAX_POINTS_PLOT,
                                 color=[0, 0.7, 0.7],
                                 linewidth=4)
        # Add starting position marker
        trajectory_fig.add_graph("start_pos",
                                 window_size=1,
                                 x0=[start_x],
                                 y0=[start_y],
                                 marker=11,
                                 color=[1, 0.5, 0],
                                 markertext="Start",
                                 marker_text_offset=1)
        # Add end position marker
        trajectory_fig.add_graph("end_pos",
                                 window_size=1,
                                 x0=[waypoints_np[-1, 0]],
                                 y0=[waypoints_np[-1, 1]],
                                 marker="D",
                                 color='r',
                                 markertext="End",
                                 marker_text_offset=1)
        # Add car marker
        trajectory_fig.add_graph("car",
                                 window_size=1,
                                 marker="s",
                                 color='b',
                                 markertext="Car",
                                 marker_text_offset=1)

        ###
        # Add 1D speed profile updater
        ###
        forward_speed_fig =\
                lp_1d.plot_new_dynamic_figure(title="Forward Speed (m/s)")
        forward_speed_fig.add_graph("forward_speed",
                                    label="forward_speed",
                                    window_size=TOTAL_EPISODE_FRAMES)
        forward_speed_fig.add_graph("reference_signal",
                                    label="reference_Signal",
                                    window_size=TOTAL_EPISODE_FRAMES)

        # Add throttle signals graph
        throttle_fig = lp_1d.plot_new_dynamic_figure(title="Throttle")
        throttle_fig.add_graph("throttle",
                               label="throttle",
                               window_size=TOTAL_EPISODE_FRAMES)
        # Add brake signals graph
        brake_fig = lp_1d.plot_new_dynamic_figure(title="Brake")
        brake_fig.add_graph("brake",
                            label="brake",
                            window_size=TOTAL_EPISODE_FRAMES)
        # Add steering signals graph
        steer_fig = lp_1d.plot_new_dynamic_figure(title="Steer")
        steer_fig.add_graph("steer",
                            label="steer",
                            window_size=TOTAL_EPISODE_FRAMES)

        # live plotter is disabled, hide windows
        if not enable_live_plot:
            lp_traj._root.withdraw()
            lp_1d._root.withdraw()

        # Iterate the frames until the end of the waypoints is reached or
        # the TOTAL_EPISODE_FRAMES is reached. The controller simulation then
        # ouptuts the results to the controller output directory.
        reached_the_end = False
        skip_first_frame = True
        closest_index = 0  # Index of waypoint that is currently closest to
        # the car (assumed to be the first index)
        closest_distance = 0  # Closest distance of closest waypoint to car
        for frame in range(TOTAL_EPISODE_FRAMES):
            # Gather current data from the CARLA server
            measurement_data, sensor_data = client.read_data()

            # Update pose, timestamp
            current_x, current_y, current_yaw = \
                get_current_pose(measurement_data)
            current_speed = measurement_data.player_measurements.forward_speed
            current_timestamp = float(measurement_data.game_timestamp) / 1000.0

            # Wait for some initial time before starting the demo
            if current_timestamp <= WAIT_TIME_BEFORE_START:
                send_control_command(client, throttle=0.0, steer=0, brake=1.0)
                continue
            else:
                current_timestamp = current_timestamp - WAIT_TIME_BEFORE_START

            # Store history
            x_history.append(current_x)
            y_history.append(current_y)
            yaw_history.append(current_yaw)
            speed_history.append(current_speed)
            time_history.append(current_timestamp)

            # Save the images to disk if requested.
            if args.save_images_to_disk:
                for name, measurement in sensor_data.items():
                    filename = args.out_filename_format.format(name, frame)
                    measurement.save_to_disk(filename)

            ###
            # Controller update (this uses the controller2d.py implementation)
            ###

            # To reduce the amount of waypoints sent to the controller,
            # provide a subset of waypoints that are within some
            # lookahead distance from the closest point to the car. Provide
            # a set of waypoints behind the car as well.

            # Find closest waypoint index to car. First increment the index
            # from the previous index until the new distance calculations
            # are increasing. Apply the same rule decrementing the index.
            # The final index should be the closest point (it is assumed that
            # the car will always break out of instability points where there
            # are two indices with the same minimum distance, as in the
            # center of a circle)
            closest_distance = np.linalg.norm(
                np.array([
                    waypoints_np[closest_index, 0] - current_x,
                    waypoints_np[closest_index, 1] - current_y
                ]))
            new_distance = closest_distance
            new_index = closest_index
            while new_distance <= closest_distance:
                closest_distance = new_distance
                closest_index = new_index
                new_index += 1
                if new_index >= waypoints_np.shape[0]:  # End of path
                    break
                new_distance = np.linalg.norm(
                    np.array([
                        waypoints_np[new_index, 0] - current_x,
                        waypoints_np[new_index, 1] - current_y
                    ]))
            new_distance = closest_distance
            new_index = closest_index
            while new_distance <= closest_distance:
                closest_distance = new_distance
                closest_index = new_index
                new_index -= 1
                if new_index < 0:  # Beginning of path
                    break
                new_distance = np.linalg.norm(
                    np.array([
                        waypoints_np[new_index, 0] - current_x,
                        waypoints_np[new_index, 1] - current_y
                    ]))

            # Once the closest index is found, return the path that has 1
            # waypoint behind and X waypoints ahead, where X is the index
            # that has a lookahead distance specified by
            # INTERP_LOOKAHEAD_DISTANCE
            waypoint_subset_first_index = closest_index - 1
            if waypoint_subset_first_index < 0:
                waypoint_subset_first_index = 0

            waypoint_subset_last_index = closest_index
            total_distance_ahead = 0
            while total_distance_ahead < INTERP_LOOKAHEAD_DISTANCE:
                total_distance_ahead += wp_distance[waypoint_subset_last_index]
                waypoint_subset_last_index += 1
                if waypoint_subset_last_index >= waypoints_np.shape[0]:
                    waypoint_subset_last_index = waypoints_np.shape[0] - 1
                    break

            # Use the first and last waypoint subset indices into the hash
            # table to obtain the first and last indicies for the interpolated
            # list. Update the interpolated waypoints to the controller
            # for the next controller update.
            new_waypoints = \
                    wp_interp[wp_interp_hash[waypoint_subset_first_index]:\
                              wp_interp_hash[waypoint_subset_last_index] + 1]
            controller.update_waypoints(new_waypoints)

            # Update the other controller values and controls
            controller.update_values(current_x, current_y, current_yaw,
                                     current_speed, current_timestamp, frame)
            controller.update_controls()
            cmd_throttle, cmd_steer, cmd_brake = controller.get_commands()

            # Skip the first frame (so the controller has proper outputs)
            if skip_first_frame and frame == 0:
                pass
            else:
                # Update live plotter with new feedback
                trajectory_fig.roll("trajectory", current_x, current_y)
                trajectory_fig.roll("car", current_x, current_y)
                # When plotting lookahead path, only plot a number of points
                # (INTERP_MAX_POINTS_PLOT amount of points). This is meant
                # to decrease load when live plotting
                new_waypoints_np = np.array(new_waypoints)
                path_indices = np.floor(
                    np.linspace(0, new_waypoints_np.shape[0] - 1,
                                INTERP_MAX_POINTS_PLOT))
                trajectory_fig.update(
                    "lookahead_path",
                    new_waypoints_np[path_indices.astype(int), 0],
                    new_waypoints_np[path_indices.astype(int), 1],
                    new_colour=[0, 0.7, 0.7])
                forward_speed_fig.roll("forward_speed", current_timestamp,
                                       current_speed)
                forward_speed_fig.roll("reference_signal", current_timestamp,
                                       controller._desired_speed)

                throttle_fig.roll("throttle", current_timestamp, cmd_throttle)
                brake_fig.roll("brake", current_timestamp, cmd_brake)
                steer_fig.roll("steer", current_timestamp, cmd_steer)

                # Refresh the live plot based on the refresh rate
                # set by the options
                if enable_live_plot and \
                   live_plot_timer.has_exceeded_lap_period():
                    lp_traj.refresh()
                    lp_1d.refresh()
                    live_plot_timer.lap()

            # Predict steering angle based on the Camera
            image = sensor_data.get('CameraRGB', None)
            # sensor_data['CameraRGB'].data
            image = image_converter.to_rgb_array(image)
            # np.save("camera.npy",image)
            image = img_preprocess(image)
            # cv2.imshow("CameraRGB",image)
            image = np.array([image])
            # print(image.shape)
            steering_angle = float(model.predict(image))
            print(steering_angle)

            # Output controller command to CARLA server
            send_control_command(client,
                                 throttle=cmd_throttle,
                                 steer=steering_angle,
                                 brake=cmd_brake)

            # Find if reached the end of waypoint. If the car is within
            # DIST_THRESHOLD_TO_LAST_WAYPOINT to the last waypoint,
            # the simulation will end.
            dist_to_last_waypoint = np.linalg.norm(
                np.array([
                    waypoints[-1][0] - current_x, waypoints[-1][1] - current_y
                ]))
            if dist_to_last_waypoint < DIST_THRESHOLD_TO_LAST_WAYPOINT:
                reached_the_end = True
            if reached_the_end:
                break

        # End of demo - Stop vehicle and Store outputs to the controller output
        # directory.
        if reached_the_end:
            print("Reached the end of path. Writing to controller_output...")
        else:
            print("Exceeded assessment time. Writing to controller_output...")
        # Stop the car
        send_control_command(client, throttle=0.0, steer=0.0, brake=1.0)
        # Store the various outputs
        store_trajectory_plot(trajectory_fig.fig, 'trajectory.png')
        store_trajectory_plot(forward_speed_fig.fig, 'forward_speed.png')
        store_trajectory_plot(throttle_fig.fig, 'throttle_output.png')
        store_trajectory_plot(brake_fig.fig, 'brake_output.png')
        store_trajectory_plot(steer_fig.fig, 'steer_output.png')
        write_trajectory_file(x_history, y_history, speed_history,
                              time_history)
コード例 #19
0
    def _on_render(self, args):
        global detected_speed
        global flg_warning
        global current_speed
        global frame

        self._timer.tick()
        gap_x = (WINDOW_WIDTH - 2 * MINI_WINDOW_WIDTH) / 3
        mini_image_y = WINDOW_HEIGHT - MINI_WINDOW_HEIGHT - gap_x

        if self._main_image is not None:
            array = image_converter.to_rgb_array(self._main_image)
            surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))

            if args.app:               
                new = False
                detected_speed_loop = ''
               
                if (frame % 5 ==0):  
                    a = Image.fromarray(array, 'RGB')
                    a.save('Y:/Temp/carla_scene.jpg')     
                    detected_speed_loop, new = self._traffic_sign_recogniser(new, detected_speed_loop)
   
                if new:
                    detected_speed = detected_speed_loop
                
                if flg_warning == -1:
                    r = 255
                    g = 0
                    b = 0
                elif flg_warning == 1:
                    r = 0
                    g = 255
                    b = 0
                else:
                    r = 0
                    g = 0
                    b = 255
                
                basicfont = pygame.font.SysFont(None, 80)
                text_detected = basicfont.render(detected_speed, True, (0,0,255))
                textrec = text_detected.get_rect()
                textrec.top = surface.get_rect().top
                textrec.midtop = surface.get_rect().midtop
                surface.blit(text_detected, textrec)
                
                text_current = basicfont.render(current_speed+'km/h', True, (r,g,b))
                textrec = text_current.get_rect()
                textrec.top = surface.get_rect().top
                textrec.bottomright = surface.get_rect().bottomright
                surface.blit(text_current, textrec)
                
                if args.app == 'Warning':
                    if flg_warning==-1:
                        basicfont = pygame.font.SysFont(None, 60)
                        text_warning = basicfont.render('REDUCE YOUR SPEED', True, (r,g,b))
                        textrec = text_warning.get_rect()
                        textrec.top = surface.get_rect().top
                        textrec.bottomleft = surface.get_rect().bottomleft
                        surface.blit(text_warning, textrec)
               
                if args.app == 'Control':
                    if flg_warning==-1:
                        basicfont = pygame.font.SysFont(None, 60)
                        text_control = basicfont.render('SPEED REDUCED', True, (r,g,b))
                        textrec = text_control.get_rect()
                        textrec.top = surface.get_rect().top
                        textrec.bottomleft = surface.get_rect().bottomleft
                        surface.blit(text_control, textrec)

            self._display.blit(surface, (0, 0))
        pygame.display.flip()
コード例 #20
0
def run_carla_client(host, port, far):
    # Here we will run a single episode with 300 frames.
    number_of_frames = 3000
    frame_step = 10  # Save one image every 100 frames
    output_folder = '_out'
    image_size = [1280, 720]
    camera_local_pos = [0.3, 0.0, 1.3]  # [X, Y, Z]
    camera_local_rotation = [0, 0, 0]  # [pitch(Y), yaw(Z), roll(X)]
    fov = 59

    # Connect with the server
    with make_carla_client(host, port) as client:
        print('CarlaClient connected')

        # Here we load the settings.
        settings = CarlaSettings()
        settings.set(SynchronousMode=True,
                     SendNonPlayerAgentsInfo=False,
                     NumberOfVehicles=20,
                     NumberOfPedestrians=40,
                     WeatherId=7)
        settings.randomize_seeds()

        camera1 = Camera('CameraDepth', PostProcessing='Depth', FOV=fov)
        camera1.set_image_size(*image_size)
        camera1.set_position(*camera_local_pos)
        camera1.set_rotation(*camera_local_rotation)
        settings.add_sensor(camera1)

        camera2 = Camera('CameraRGB', PostProcessing='SceneFinal', FOV=fov)
        camera2.set_image_size(*image_size)
        camera2.set_position(*camera_local_pos)
        camera2.set_rotation(*camera_local_rotation)
        settings.add_sensor(camera2)

        client.load_settings(settings)

        # Start at location index id '0'
        client.start_episode(0)

        # Compute the camera transform matrix
        camera_to_car_transform = camera2.get_unreal_transform()
        print("camera_to_car_transform", camera_to_car_transform)

        # Iterate every frame in the episode except for the first one.
        for frame in range(1, number_of_frames):
            # Read the data produced by the server this frame.
            measurements, sensor_data = client.read_data()

            # Save one image every 'frame_step' frames
            if not frame % frame_step:
                # Start transformations time mesure.

                # RGB image [[[r,g,b],..[r,g,b]],..[[r,g,b],..[r,g,b]]]
                image_RGB = to_rgb_array(sensor_data['CameraRGB'])
                world_transform = Transform(
                    measurements.player_measurements.transform)

                # Compute the final transformation matrix.
                car_to_world_transform = world_transform * camera_to_car_transform
                print("{}.png".format(str(frame)))
                print([
                    measurements.player_measurements.transform.location.x,
                    measurements.player_measurements.transform.location.y,
                    measurements.player_measurements.transform.location.z
                ])
                print("world_transform\n", world_transform)
                #print("car_to_world_transform\n",car_to_world_transform)
                print('\n')

                im_bgr = cv2.cvtColor(image_RGB, cv2.COLOR_RGB2BGR)
                cv2.imwrite("./{}/{}.png".format(output_folder, str(frame)),
                            im_bgr)
                # 2d to (camera) local 3d
                # We use the image_RGB to colorize each 3D point, this is optional.
                # "max_depth" is used to keep only the points that are near to the
                # camera, meaning 1.0 the farest points (sky)

                # Save PLY to disk
                # This generates the PLY string with the 3D points and the RGB colors
                # for each row of the file.

            client.send_control(
                measurements.player_measurements.autopilot_control)
コード例 #21
0
    def _on_render(self):
        gap_x = (WINDOW_WIDTH - 2 * MINI_WINDOW_WIDTH) / 3
        mini_image_y = WINDOW_HEIGHT - MINI_WINDOW_HEIGHT - gap_x

        if self._main_image is not None:
            array = image_converter.to_rgb_array(self._main_image)

            ################################################################################################   SAVING IMAGE DATA IN %SELF._DATA%
            #print(self._velocity)
            #print("\n")
            if self._velocity > 5 and self._val3 == 1:  ## YOU CAN CHANGE THIS ACCORDING TO YOU
                print("\nimage is saved\n")
                self._data['data{}_angle_{}_throttle_{}_speed_{}'.format(
                    self._i, format(self._val1, '.2f'),
                    format(self._val2, '.2f'), format(self._velocity,
                                                      '.2f'))] = array
            self._i += 1  ##FRAME NUMBER INCREASE BY ONE

            surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))
            self._display.blit(surface, (0, 0))

        if self._mini_view_image1 is not None:
            array = image_converter.depth_to_logarithmic_grayscale(
                self._mini_view_image1)
            surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))
            self._display.blit(surface, (gap_x, mini_image_y))

        if self._map_view is not None:
            array = self._map_view
            array = array[:, :, :3]

            new_window_width = \
                (float(WINDOW_HEIGHT) / float(self._map_shape[0])) * \
                float(self._map_shape[1])
            surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))

            w_pos = int(self._position[0] *
                        (float(WINDOW_HEIGHT) / float(self._map_shape[0])))
            h_pos = int(self._position[1] *
                        (new_window_width / float(self._map_shape[1])))

            pygame.draw.circle(surface, [255, 0, 0, 255], (w_pos, h_pos), 6, 0)
            for agent in self._agent_positions:
                if agent.HasField('vehicle'):
                    agent_position = self._map.convert_to_pixel([
                        agent.vehicle.transform.location.x,
                        agent.vehicle.transform.location.y,
                        agent.vehicle.transform.location.z
                    ])

                    w_pos = int(
                        agent_position[0] *
                        (float(WINDOW_HEIGHT) / float(self._map_shape[0])))
                    h_pos = int(agent_position[1] *
                                (new_window_width / float(self._map_shape[1])))

                    pygame.draw.circle(surface, [255, 0, 255, 255],
                                       (w_pos, h_pos), 4, 0)

            self._display.blit(surface, (WINDOW_WIDTH, 0))

        pygame.display.flip()
コード例 #22
0
    def _on_render(self):
        datapoints = []

        if self._main_image is not None and self._depth_image is not None:
            # Convert main image
            image = image_converter.to_rgb_array(self._main_image)

            # Retrieve and draw datapoints
            image, datapoints = self._generate_datapoints(image)

            # Draw lidar
            # Camera coordinate system is left, up, forwards
            if VISUALIZE_LIDAR:
                # Calculation to shift bboxes relative to pitch and roll of player
                rotation = self._measurements.player_measurements.transform.rotation
                pitch, roll, yaw = rotation.pitch, rotation.roll, rotation.yaw
                # Since measurements are in degrees, convert to radians

                pitch = degrees_to_radians(pitch)
                roll = degrees_to_radians(roll)
                yaw = degrees_to_radians(yaw)
                print('pitch: ', pitch)
                print('roll: ', roll)
                print('yaw: ', yaw)

                # Rotation matrix for pitch
                rotP = np.array([[cos(pitch), 0, sin(pitch)], [0, 1, 0],
                                 [-sin(pitch), 0, cos(pitch)]])
                # Rotation matrix for roll
                rotR = np.array([[1, 0, 0], [0, cos(roll), -sin(roll)],
                                 [0, sin(roll), cos(roll)]])

                # combined rotation matrix, must be in order roll, pitch, yaw
                rotRP = np.matmul(rotR, rotP)
                # Take the points from the point cloud and transform to car space
                point_cloud = np.array(
                    self._lidar_to_car_transform.transform_points(
                        self._lidar_measurement.data))
                point_cloud[:, 2] -= LIDAR_HEIGHT_POS
                point_cloud = np.matmul(rotRP, point_cloud.T).T
                # Transform to camera space by the inverse of camera_to_car transform
                point_cloud_cam = self._camera_to_car_transform.inverse(
                ).transform_points(point_cloud)
                point_cloud_cam[:, 1] += LIDAR_HEIGHT_POS
                image = lidar_utils.project_point_cloud(
                    image, point_cloud_cam, self._intrinsic, 1)

            # Display image
            surface = pygame.surfarray.make_surface(image.swapaxes(0, 1))
            self._display.blit(surface, (0, 0))
            pygame.display.flip()

            if (self._timer.step + 1) % STEPS_BETWEEN_RECORDINGS == 0:
                if datapoints:
                    # Avoid doing this twice or unnecessarily often
                    if not VISUALIZE_LIDAR:
                        # Calculation to shift bboxes relative to pitch and roll of player
                        rotation = self._measurements.player_measurements.transform.rotation
                        pitch, roll, yaw = rotation.pitch, rotation.roll, rotation.yaw
                        # Since measurements are in degrees, convert to radians

                        pitch = degrees_to_radians(pitch)
                        roll = degrees_to_radians(roll)
                        yaw = degrees_to_radians(yaw)
                        print('pitch: ', pitch)
                        print('roll: ', roll)
                        print('yaw: ', yaw)

                        # Rotation matrix for pitch
                        rotP = np.array([[cos(pitch), 0,
                                          sin(pitch)], [0, 1, 0],
                                         [-sin(pitch), 0,
                                          cos(pitch)]])
                        # Rotation matrix for roll
                        rotR = np.array([[1, 0, 0], [0,
                                                     cos(roll), -sin(roll)],
                                         [0, sin(roll),
                                          cos(roll)]])

                        # combined rotation matrix, must be in order roll, pitch, yaw
                        rotRP = np.matmul(rotR, rotP)
                        # Take the points from the point cloud and transform to car space
                        point_cloud = np.array(
                            self._lidar_to_car_transform.transform_points(
                                self._lidar_measurement.data))
                        point_cloud[:, 2] -= LIDAR_HEIGHT_POS
                        point_cloud = np.matmul(rotRP, point_cloud.T).T
                    self._update_agent_location()
                    # Save screen, lidar and kitti training labels together with calibration and groundplane files
                    # self._save_training_files(datapoints, point_cloud)
                    img = get_image_data(
                        image_converter.to_rgb_array(self._main_image))
                    lid = get_lidar_data(point_cloud)
                    calib = get_calibration_matrices(self._intrinsic,
                                                     self._extrinsic)
                    mess = {
                        "idx": self.captured_frame_no,
                        "image_2": img,
                        "velodyne": lid,
                        "calib": calib
                    }
                    annos = self.evaluator.predict(mess)

                    # annos = self.evaluator.evaluate(self.captured_frame_no)
                    print(annos)
                    scores = annos["score"]
                    vertices = annos["vertices"]
                    bbox = annos["bbox"]

                    for i, score in enumerate(scores):
                        if score * 100 > 35:
                            # draw 3D
                            image = draw_projected_box3d(image,
                                                         vertices[i],
                                                         color=(0, 0, 255),
                                                         thickness=1)
                            image = cv2.putText(
                                image, str(round(score * 100, 2)),
                                (int(bbox[i][0]), int(bbox[i][1])),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 0, 255),
                                1, cv2.LINE_AA)
                            # draw 2D
                            # bbox = annos["bbox"]
                            # x1, y1, x2, y2 = bbox[i]
                            # cv2.rectangle(image, pt1=(x1, y1), pt2=(x2, y2), color=(0, 0, 255), thickness=2)

                    # Display image
                    surface = pygame.surfarray.make_surface(
                        image.swapaxes(0, 1))
                    self._display.blit(surface, (0, 0))
                    pygame.display.flip()
                    self.captured_frame_no += 1
                    self._captured_frames_since_restart += 1
                    self._frames_since_last_capture = 0
                    time.sleep(0.5)
                else:
                    logging.debug("Could save datapoint".format(
                        DISTANCE_SINCE_LAST_RECORDING))
            else:
                self._frames_since_last_capture += 1
                logging.debug(
                    "Could not save training data - no visible agents of selected classes in scene"
                )
def exec_waypoint_nav_demo(args):
    """ Executes waypoint navigation demo.
    """

    with make_carla_client(args.host, args.port) as client:
        print('Carla client connected.')

        settings, camera2 = make_carla_settings(args)

        # Now we load these settings into the server. The server replies
        # with a scene description containing the available start spots for
        # the player. Here we can provide a CarlaSettings object or a
        # CarlaSettings.ini file as string.

        scene = client.load_settings(settings)

        # Refer to the player start folder in the WorldOutliner to see the
        # player start information
        player_start = PLAYER_START_INDEX

        # Notify the server that we want to start the episode at the
        # player_start index. This function blocks until the server is ready
        # to start the episode.
        print('Starting new episode at %r...' % scene.map_name)
        client.start_episode(player_start)

        #############################################
        # Load Configurations
        #############################################

        # Load configuration file (options.cfg) and then parses for the various
        # options. Here we have two main options:
        # live_plotting and live_plotting_period, which controls whether
        # live plotting is enabled or how often the live plotter updates
        # during the simulation run.
        config = configparser.ConfigParser()
        config.read(
            os.path.join(os.path.dirname(os.path.realpath(__file__)),
                         'options.cfg'))
        demo_opt = config['Demo Parameters']

        # Get options
        enable_live_plot = demo_opt.get('live_plotting', 'true').capitalize()
        enable_live_plot = enable_live_plot == 'True'
        live_plot_period = float(demo_opt.get('live_plotting_period', 0))

        # Set options
        live_plot_timer = Timer(live_plot_period)

        #############################################
        # Load Waypoints
        #############################################
        # Opens the waypoint file and stores it to "waypoints"
        camera_to_car_transform = camera2.get_unreal_transform()

        carla_utils_obj = segmentationobj.carla_utils(intrinsic)
        measurement_data, sensor_data = client.read_data()
        measurements = measurement_data
        image_RGB = to_rgb_array(sensor_data['CameraRGB'])
        image_depth = to_rgb_array(sensor_data['CameraDepth'])

        world_transform = Transform(measurements.player_measurements.transform)

        im_bgr = cv2.cvtColor(image_RGB, cv2.COLOR_RGB2BGR)

        pos_vector = ([
            measurements.player_measurements.transform.location.x,
            measurements.player_measurements.transform.location.y,
            measurements.player_measurements.transform.location.z
        ])

        fdfd = str(world_transform)
        sdsd = fdfd[1:-1].split('\n')
        new = []
        for i in sdsd:
            dd = i[:-1].lstrip('[ ')
            ff = re.sub("\s+", ",", dd.strip())
            gg = ff.split(',')
            new.append([float(i) for i in gg])
        newworld = np.array(new)
        fdfd = str(camera_to_car_transform)
        sdsd = fdfd[1:-1].split('\n')
        new = []
        for i in sdsd:
            dd = i[:-1].lstrip('[ ')
            ff = re.sub("\s+", ",", dd.strip())
            gg = ff.split(',')
            new.append([float(i) for i in gg])
        newcam = np.array(new)
        extrinsic = np.matmul(newworld, newcam)
        #print("dfjdfjkjfdkf",extrinsic)
        get_2d_point, pointsmid, res_img = carla_utils_obj.run_seg(
            im_bgr, extrinsic, pos_vector)
        #print(get_2d_point)
        #dis1=((get_2d_point[1]-pointsmid[0][1]),(get_2d_point[0]-pointsmid[0][0]))
        #dis2=((get_2d_point[1]-pointsmid[1][1]),(get_2d_point[0]-pointsmid[1][0]))
        #dis3=((get_2d_point[1]-pointsmid[2][1]),(get_2d_point[0]-pointsmid[2][0]))
        #dis4=((get_2d_point[1]-pointsmid[3][1]),(get_2d_point[0]-pointsmid[3][0]))
        #flagbox=darknet_proper_fps.performDetect(im_bgr,thresh,configPath, weightPath, metaPath, showImage, makeImageOnly, initOnly,pointsmid)
        depth1 = image_depth[int(pointsmid[0][0]), int(pointsmid[0][1])]
        depth2 = image_depth[int(pointsmid[1][0]), int(pointsmid[1][1])]
        depth3 = image_depth[int(pointsmid[2][0]), int(pointsmid[2][1])]
        depth4 = image_depth[int(pointsmid[3][0]), int(pointsmid[3][1])]

        scale1 = depth1[0] + depth1[1] * 256 + depth1[2] * 256 * 256
        scale1 = scale1 / ((256 * 256 * 256) - 1)
        depth1 = scale1 * 1000
        pos2d1 = np.array([(WINDOW_WIDTH - pointsmid[0][1]) * depth1,
                           (WINDOW_HEIGHT - pointsmid[0][0]) * depth1, depth1])
        first1 = np.dot(np.linalg.inv(intrinsic), pos2d1)
        first1 = np.append(first1, 1)
        sec1 = np.matmul((extrinsic), first1)

        scale2 = depth2[0] + depth2[1] * 256 + depth2[2] * 256 * 256
        scale2 = scale2 / ((256 * 256 * 256) - 1)
        depth2 = scale2 * 1000
        pos2d2 = np.array([(WINDOW_WIDTH - pointsmid[1][1]) * depth2,
                           (WINDOW_HEIGHT - pointsmid[1][0]) * depth2, depth2])
        first2 = np.dot(np.linalg.inv(intrinsic), pos2d2)
        first2 = np.append(first2, 1)
        sec2 = np.matmul((extrinsic), first2)

        scale3 = depth3[0] + depth3[1] * 256 + depth3[2] * 256 * 256
        scale3 = scale3 / ((256 * 256 * 256) - 1)
        depth3 = scale3 * 1000
        pos2d3 = np.array([(WINDOW_WIDTH - pointsmid[2][1]) * depth3,
                           (WINDOW_HEIGHT - pointsmid[2][0]) * depth3, depth3])
        first3 = np.dot(np.linalg.inv(intrinsic), pos2d3)
        first3 = np.append(first3, 1)
        sec3 = np.matmul((extrinsic), first3)

        scale4 = depth4[0] + depth4[1] * 256 + depth4[2] * 256 * 256
        scale4 = scale4 / ((256 * 256 * 256) - 1)
        depth4 = scale4 * 1000
        pos2d4 = np.array([(WINDOW_WIDTH - pointsmid[3][1]) * depth4,
                           (WINDOW_HEIGHT - pointsmid[3][0]) * depth4, depth4])
        first4 = np.dot(np.linalg.inv(intrinsic), pos2d4)
        first4 = np.append(first4, 1)
        sec4 = np.matmul((extrinsic), first4)

        depth = image_depth[int(get_2d_point[0]), int(get_2d_point[1])]
        scale = depth[0] + depth[1] * 256 + depth[2] * 256 * 256
        scale = scale / ((256 * 256 * 256) - 1)
        depth = scale * 1000
        pos2d = np.array([(WINDOW_WIDTH - get_2d_point[1]) * depth,
                          (WINDOW_HEIGHT - get_2d_point[0]) * depth, depth])

        first = np.dot(np.linalg.inv(intrinsic), pos2d)
        first = np.append(first, 1)
        sec = np.matmul((extrinsic), first)
        print("present", pos_vector)
        print('goal-', sec)
        pos_vector[2] = 4.0
        ini = pos_vector
        goal = list(sec)
        goal[2] = 1.08
        aa = ini[0]
        dec = abs(ini[1] - goal[1]) / abs(aa - (goal[0]))
        f1 = open(WAYPOINTS_FILENAME, 'a+')
        for i in range(int(goal[0]), int(aa)):

            # print(int(goal[0])-i)
            if abs((int(aa) - i)) < 10:

                ini = [ini[0] - 1, ini[1] + dec, ini[2] - 0.03]
            else:
                ini = [ini[0] - 1, ini[1] + dec, ini[2]]
            #if i<int(aa)-1:
            f1.write(
                str(ini[0]) + ',' + str(ini[1]) + ',' + str(ini[2]) + '\n')
            #else:
            #f1.write(str(ini[0])+','+str(ini[1])+','+str(ini[2]))

        waypoints_file = WAYPOINTS_FILENAME
        waypoints_np = None
        with open(waypoints_file) as waypoints_file_handle:
            waypoints = list(
                csv.reader(waypoints_file_handle,
                           delimiter=',',
                           quoting=csv.QUOTE_NONNUMERIC))
            waypoints_np = np.array(waypoints)
        #print("dfjdjfhjdhfjdfhjdfdjdfdufh",waypoints_np)
        print((waypoints_np))

        # Because the waypoints are discrete and our controller performs better
        # with a continuous path, here we will send a subset of the waypoints
        # within some lookahead distance from the closest point to the vehicle.
        # Interpolating between each waypoint will provide a finer resolution
        # path and make it more "continuous". A simple linear interpolation
        # is used as a preliminary method to address this issue, though it is
        # better addressed with better interpolation methods (spline
        # interpolation, for example).
        # More appropriate interpolation methods will not be used here for the
        # sake of demonstration on what effects discrete paths can have on
        # the controller. It is made much more obvious with linear
        # interpolation, because in a way part of the path will be continuous
        # while the discontinuous parts (which happens at the waypoints) will
        # show just what sort of effects these points have on the controller.
        # Can you spot these during the simulation? If so, how can you further
        # reduce these effects?

        # Linear interpolation computations
        # Compute a list of distances between waypoints
        wp_distance = []  # distance array
        for i in range(1, waypoints_np.shape[0]):
            wp_distance.append(
                np.sqrt((waypoints_np[i, 0] - waypoints_np[i - 1, 0])**2 +
                        (waypoints_np[i, 1] - waypoints_np[i - 1, 1])**2))
        wp_distance.append(0)  # last distance is 0 because it is the distance
        # from the last waypoint to the last waypoint

        # Linearly interpolate between waypoints and store in a list
        wp_interp = []  # interpolated values
        # (rows = waypoints, columns = [x, y, v])
        wp_interp_hash = []  # hash table which indexes waypoints_np
        # to the index of the waypoint in wp_interp
        interp_counter = 0  # counter for current interpolated point index
        for i in range(waypoints_np.shape[0] - 1):
            # Add original waypoint to interpolated waypoints list (and append
            # it to the hash table)
            wp_interp.append(list(waypoints_np[i]))
            wp_interp_hash.append(interp_counter)
            interp_counter += 1

            # Interpolate to the next waypoint. First compute the number of
            # points to interpolate based on the desired resolution and
            # incrementally add interpolated points until the next waypoint
            # is about to be reached.
            num_pts_to_interp = int(np.floor(wp_distance[i] /\
                                         float(INTERP_DISTANCE_RES)) - 1)
            wp_vector = waypoints_np[i + 1] - waypoints_np[i]
            wp_uvector = wp_vector / np.linalg.norm(wp_vector)
            for j in range(num_pts_to_interp):
                next_wp_vector = INTERP_DISTANCE_RES * float(j +
                                                             1) * wp_uvector
                wp_interp.append(list(waypoints_np[i] + next_wp_vector))
                interp_counter += 1
        # add last waypoint at the end
        wp_interp.append(list(waypoints_np[-1]))
        wp_interp_hash.append(interp_counter)
        interp_counter += 1

        #############################################
        # Controller 2D Class Declaration
        #############################################
        # This is where we take the controller2d.py class
        # and apply it to the simulator
        controller = controller2d.Controller2D(waypoints)

        #############################################
        # Determine simulation average timestep (and total frames)
        #############################################
        # Ensure at least one frame is used to compute average timestep
        num_iterations = ITER_FOR_SIM_TIMESTEP
        if (ITER_FOR_SIM_TIMESTEP < 1):
            num_iterations = 1

        # Gather current data from the CARLA server. This is used to get the
        # simulator starting game time. Note that we also need to
        # send a command back to the CARLA server because synchronous mode
        # is enabled.

        sim_start_stamp = measurement_data.game_timestamp / 1000.0
        # Send a control command to proceed to next iteration.
        #print("dddddddddddddddddddddddddddddddddddddddddddddd",sim_start_stamp)
        # This mainly applies for simulations that are in synchronous mode.
        send_control_command(client, throttle=0.0, steer=0, brake=1.0)
        # Computes the average timestep based on several initial iterations
        sim_duration = 0
        for i in range(num_iterations):
            # Gather current data
            measurement_data, sensor_data = client.read_data()
            # Send a control command to proceed to next iteration
            send_control_command(client, throttle=0.0, steer=0, brake=1.0)
            # Last stamp
            if i == num_iterations - 1:
                sim_duration = measurement_data.game_timestamp / 1000.0 -\
                               sim_start_stamp

        # Outputs average simulation timestep and computes how many frames
        # will elapse before the simulation should end based on various
        # parameters that we set in the beginning.
        SIMULATION_TIME_STEP = sim_duration / float(num_iterations)
        print("SERVER SIMULATION STEP APPROXIMATION: " + \
              str(SIMULATION_TIME_STEP))
        TOTAL_EPISODE_FRAMES = int((TOTAL_RUN_TIME + WAIT_TIME_BEFORE_START) /\
                               SIMULATION_TIME_STEP) + TOTAL_FRAME_BUFFER

        #############################################
        # Frame-by-Frame Iteration and Initialization
        #############################################
        # Store pose history starting from the start position
        measurement_data, sensor_data = client.read_data()
        start_x, start_y, start_yaw = get_current_pose(measurement_data)
        send_control_command(client, throttle=0.0, steer=0, brake=1.0)
        x_history = [start_x]
        y_history = [start_y]
        yaw_history = [start_yaw]
        time_history = [0]
        speed_history = [0]

        #############################################
        # Vehicle Trajectory Live Plotting Setup
        #############################################
        # Uses the live plotter to generate live feedback during the simulation
        # The two feedback includes the trajectory feedback and
        # the controller feedback (which includes the speed tracking).
        lp_traj = lv.LivePlotter(tk_title="Trajectory Trace")
        lp_1d = lv.LivePlotter(tk_title="Controls Feedback")

        ###
        # Add 2D position / trajectory plot
        ###
        trajectory_fig = lp_traj.plot_new_dynamic_2d_figure(
            title='Vehicle Trajectory',
            figsize=(FIGSIZE_X_INCHES, FIGSIZE_Y_INCHES),
            edgecolor="black",
            rect=[PLOT_LEFT, PLOT_BOT, PLOT_WIDTH, PLOT_HEIGHT])

        trajectory_fig.set_invert_x_axis()  # Because UE4 uses left-handed
        # coordinate system the X
        # axis in the graph is flipped
        trajectory_fig.set_axis_equal()  # X-Y spacing should be equal in size

        # Add waypoint markers
        trajectory_fig.add_graph("waypoints",
                                 window_size=waypoints_np.shape[0],
                                 x0=waypoints_np[:, 0],
                                 y0=waypoints_np[:, 1],
                                 linestyle="-",
                                 marker="",
                                 color='g')
        # Add trajectory markers
        trajectory_fig.add_graph("trajectory",
                                 window_size=TOTAL_EPISODE_FRAMES,
                                 x0=[start_x] * TOTAL_EPISODE_FRAMES,
                                 y0=[start_y] * TOTAL_EPISODE_FRAMES,
                                 color=[1, 0.5, 0])
        # Add lookahead path
        trajectory_fig.add_graph("lookahead_path",
                                 window_size=INTERP_MAX_POINTS_PLOT,
                                 x0=[start_x] * INTERP_MAX_POINTS_PLOT,
                                 y0=[start_y] * INTERP_MAX_POINTS_PLOT,
                                 color=[0, 0.7, 0.7],
                                 linewidth=4)
        # Add starting position marker
        trajectory_fig.add_graph("start_pos",
                                 window_size=1,
                                 x0=[start_x],
                                 y0=[start_y],
                                 marker=11,
                                 color=[1, 0.5, 0],
                                 markertext="Start",
                                 marker_text_offset=1)
        # Add end position marker
        trajectory_fig.add_graph("end_pos",
                                 window_size=1,
                                 x0=[waypoints_np[-1, 0]],
                                 y0=[waypoints_np[-1, 1]],
                                 marker="D",
                                 color='r',
                                 markertext="End",
                                 marker_text_offset=1)
        # Add car marker
        trajectory_fig.add_graph("car",
                                 window_size=1,
                                 marker="s",
                                 color='b',
                                 markertext="Car",
                                 marker_text_offset=1)

        ###
        # Add 1D speed profile updater
        ###
        forward_speed_fig =\
                lp_1d.plot_new_dynamic_figure(title="Forward Speed (m/s)")
        forward_speed_fig.add_graph("forward_speed",
                                    label="forward_speed",
                                    window_size=TOTAL_EPISODE_FRAMES)
        forward_speed_fig.add_graph("reference_signal",
                                    label="reference_Signal",
                                    window_size=TOTAL_EPISODE_FRAMES)

        # Add throttle signals graph
        throttle_fig = lp_1d.plot_new_dynamic_figure(title="Throttle")
        throttle_fig.add_graph("throttle",
                               label="throttle",
                               window_size=TOTAL_EPISODE_FRAMES)
        # Add brake signals graph
        brake_fig = lp_1d.plot_new_dynamic_figure(title="Brake")
        brake_fig.add_graph("brake",
                            label="brake",
                            window_size=TOTAL_EPISODE_FRAMES)
        # Add steering signals graph
        steer_fig = lp_1d.plot_new_dynamic_figure(title="Steer")
        steer_fig.add_graph("steer",
                            label="steer",
                            window_size=TOTAL_EPISODE_FRAMES)

        # live plotter is disabled, hide windows
        if not enable_live_plot:
            lp_traj._root.withdraw()
            lp_1d._root.withdraw()

        # Iterate the frames until the end of the waypoints is reached or
        # the TOTAL_EPISODE_FRAMES is reached. The controller simulation then
        # ouptuts the results to the controller output directory.
        reached_the_end = False
        skip_first_frame = True
        closest_index = 0  # Index of waypoint that is currently closest to
        # the car (assumed to be the first index)
        closest_distance = 0  # Closest distance of closest waypoint to car
        counter = 0
        #print("dssssssssssssssssssssssssssssssssssssssssssssssssssssssss",TOTAL_EPISODE_FRAMES)
        for frame in range(TOTAL_EPISODE_FRAMES):
            # Gather current data from the CARLA server
            measurement_data, sensor_data = client.read_data()
            #print("lllllllllllllllllllllllllll",len(waypoints_np),waypoints_np[-1])
            #update_pts=list(waypoints_np[-1])

            # Update pose, timestamp
            current_x, current_y, current_yaw = \
                get_current_pose(measurement_data)
            current_speed = measurement_data.player_measurements.forward_speed
            current_timestamp = float(measurement_data.game_timestamp) / 1000.0
            #print("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",current_timestamp)
            # Wait for some initial time before starting the demo
            if current_timestamp <= WAIT_TIME_BEFORE_START:
                send_control_command(client, throttle=0.0, steer=0, brake=1.0)
                counter += 1
                #flagbox=darknet_proper_fps.performDetect(res_img,thresh,configPath, weightPath, metaPath, showImage, makeImageOnly, initOnly,pointsmid)
                continue
            else:
                current_timestamp = current_timestamp - WAIT_TIME_BEFORE_START
            #print("sdmskdkfjdkfjdjksf",counter)
            #for i in range(1,5):
            update_pts = [list(sec1), list(sec2), list(sec3), list(sec4)]
            pts_2d_ls = []
            for i in range(len(update_pts)):
                world_coord = np.asarray(update_pts[i]).reshape(4, -1)
                # print("world_coord.shape",world_coord.shape)
                # world_coord = np.array([[250.0 ,129.0 ,38.0 ,1.0]]).reshape(4,-1)
                world_transform = Transform(
                    measurement_data.player_measurements.transform)

                fdfd = str(world_transform)
                sdsd = fdfd[1:-1].split('\n')
                new = []
                for i in sdsd:
                    dd = i[:-1].lstrip('[ ')
                    ff = re.sub("\s+", ",", dd.strip())
                    gg = ff.split(',')
                    new.append([float(i) for i in gg])
                newworld = np.array(new)

                extrinsic = np.matmul(newworld, newcam)
                cam_coord = np.linalg.inv(extrinsic) @ world_coord
                img_coord = intrinsic @ cam_coord[:3]
                img_coord = np.array([
                    img_coord[0] / img_coord[2], img_coord[1] / img_coord[2],
                    img_coord[2]
                ])

                if (img_coord[2] > 0):
                    x_2d = WINDOW_WIDTH - img_coord[0]
                    y_2d = WINDOW_HEIGHT - img_coord[1]
                    pts_2d_ls.append([x_2d, y_2d])

            #x_diff=(pts_2d_ls[0]-get_2d_point[1])
            #y_diff=(pts_2d_ls[1]-get_2d_point[0])
            #print("sdsdjsdsjdksjdskdjk",x_diff,y_diff,pts_2d_ls,get_2d_point)
            #get_2d_point=[pts_2d_ls[1],pts_2d_ls[0]]

            image_RGB = to_rgb_array(sensor_data['CameraRGB'])
            im_bgr = cv2.cvtColor(image_RGB, cv2.COLOR_RGB2BGR)
            image_depth = to_rgb_array(sensor_data['CameraDepth'])
            counter += 1
            if counter == 0:
                flagbox = False
            else:
                #pointsmid[0][1]=pointsmid[0][1]+x_diff
                #pointsmid[0][0]=pointsmid[0][0]+y_diff
                #pointsmid[1][1]=pointsmid[1][1]+x_diff
                #pointsmid[1][0]=pointsmid[1][0]+y_diff
                #pointsmid[2][1]=pointsmid[2][1]+x_diff
                #pointsmid[2][0]=pointsmid[2][0]+y_diff
                #pointsmid[3][1]=pointsmid[3][1]+x_diff
                #pointsmid[3][0]=pointsmid[3][0]+y_diff
                try:
                    pointsmid[0][1] = pts_2d_ls[0][0]
                    pointsmid[0][0] = pts_2d_ls[0][1]
                    pointsmid[1][1] = pts_2d_ls[1][0]
                    pointsmid[1][0] = pts_2d_ls[1][1]
                    pointsmid[2][1] = pts_2d_ls[2][0]
                    pointsmid[2][0] = pts_2d_ls[2][1]
                    pointsmid[3][1] = pts_2d_ls[3][0]
                    pointsmid[3][0] = pts_2d_ls[3][1]
                    disbox = False
                    flagbox = darknet_proper_fps.performDetect(
                        im_bgr, thresh, configPath, weightPath, metaPath,
                        showImage, makeImageOnly, initOnly, pointsmid, disbox)
                except Exception as e:
                    disbox = True
                    flagbox = darknet_proper_fps.performDetect(
                        im_bgr, thresh, configPath, weightPath, metaPath,
                        showImage, makeImageOnly, initOnly, pointsmid, disbox)
                    if flagbox != False:
                        midofpts = [(pointsmid[1][1] + pointsmid[0][1]) / 2,
                                    (pointsmid[1][0] + pointsmid[0][0]) / 2]
                        depthflag = image_depth[int(flagbox[1]),
                                                int(flagbox[0])]
                        depthpts = image_depth[int(midofpts[1]),
                                               int(midofpts[0])]
                        print(depthflag, depthpts)
                        #cv2.circle(im_bgr,(int(flagbox[0]),int(flagbox[1])), 5, (255,0,255), -1)
                        #cv2.circle(im_bgr,(int(midofpts[0]),int(midofpts[1])), 5, (0,0,255), -1)
                        cv2.imwrite('./seg_out/{}_zz.jpg'.format(frame),
                                    im_bgr)
                        scalenew = depthflag[
                            0] + depthflag[1] * 256 + depthflag[2] * 256 * 256
                        scalenew = scalenew / ((256 * 256 * 256) - 1)
                        depthflag = scalenew * 1000
                        scalenew = depthpts[
                            0] + depthpts[1] * 256 + depthpts[2] * 256 * 256
                        scalenew = scalenew / ((256 * 256 * 256) - 1)
                        depthpts = scalenew * 1000
                        diff = abs(depthflag - depthpts)
                        print("entereeeeeeeeeeeeeeeeeeeeeeeeeeeeherree", diff)
                        if diff < 10:
                            flagbox = True
                        else:
                            flagbox = False
                    print(e)

            print("fffffffffffffffffff", flagbox)
            x_history.append(current_x)
            y_history.append(current_y)
            yaw_history.append(current_yaw)
            speed_history.append(current_speed)
            time_history.append(current_timestamp)
            if flagbox == False:

                # Store history

                ###
                # Controller update (this uses the controller2d.py implementation)
                ###

                # To reduce the amount of waypoints sent to the controller,
                # provide a subset of waypoints that are within some
                # lookahead distance from the closest point to the car. Provide
                # a set of waypoints behind the car as well.

                # Find closest waypoint index to car. First increment the index
                # from the previous index until the new distance calculations
                # are increasing. Apply the same rule decrementing the index.
                # The final index should be the closest point (it is assumed that
                # the car will always break out of instability points where there
                # are two indices with the same minimum distance, as in the
                # center of a circle)
                closest_distance = np.linalg.norm(
                    np.array([
                        waypoints_np[closest_index, 0] - current_x,
                        waypoints_np[closest_index, 1] - current_y
                    ]))
                new_distance = closest_distance
                new_index = closest_index
                while new_distance <= closest_distance:
                    closest_distance = new_distance
                    closest_index = new_index
                    new_index += 1
                    if new_index >= waypoints_np.shape[0]:  # End of path
                        break
                    new_distance = np.linalg.norm(
                        np.array([
                            waypoints_np[new_index, 0] - current_x,
                            waypoints_np[new_index, 1] - current_y
                        ]))
                new_distance = closest_distance
                new_index = closest_index
                while new_distance <= closest_distance:
                    closest_distance = new_distance
                    closest_index = new_index
                    new_index -= 1
                    if new_index < 0:  # Beginning of path
                        break
                    new_distance = np.linalg.norm(
                        np.array([
                            waypoints_np[new_index, 0] - current_x,
                            waypoints_np[new_index, 1] - current_y
                        ]))

            # Once the closest index is found, return the path that has 1
            # waypoint behind and X waypoints ahead, where X is the index
            # that has a lookahead distance specified by
            # INTERP_LOOKAHEAD_DISTANCE
                waypoint_subset_first_index = closest_index - 1
                if waypoint_subset_first_index < 0:
                    waypoint_subset_first_index = 0

                waypoint_subset_last_index = closest_index
                total_distance_ahead = 0
                while total_distance_ahead < INTERP_LOOKAHEAD_DISTANCE:
                    total_distance_ahead += wp_distance[
                        waypoint_subset_last_index]
                    waypoint_subset_last_index += 1
                    if waypoint_subset_last_index >= waypoints_np.shape[0]:
                        waypoint_subset_last_index = waypoints_np.shape[0] - 1
                        break

            # Use the first and last waypoint subset indices into the hash
            # table to obtain the first and last indicies for the interpolated
            # list. Update the interpolated waypoints to the controller
            # for the next controller update.
                new_waypoints = \
                    wp_interp[wp_interp_hash[waypoint_subset_first_index]:\
                              wp_interp_hash[waypoint_subset_last_index] + 1]
                controller.update_waypoints(new_waypoints)

                # Update the other controller values and controls
                controller.update_values(current_x, current_y, current_yaw,
                                         current_speed, current_timestamp,
                                         frame)
                controller.update_controls()
                cmd_throttle, cmd_steer, cmd_brake = controller.get_commands()

                # Skip the first frame (so the controller has proper outputs)
                if skip_first_frame and frame == 0:
                    pass
                else:
                    # Update live plotter with new feedback
                    trajectory_fig.roll("trajectory", current_x, current_y)
                    trajectory_fig.roll("car", current_x, current_y)
                    # When plotting lookahead path, only plot a number of points
                    # (INTERP_MAX_POINTS_PLOT amount of points). This is meant
                    # to decrease load when live plotting
                    new_waypoints_np = np.array(new_waypoints)
                    path_indices = np.floor(
                        np.linspace(0, new_waypoints_np.shape[0] - 1,
                                    INTERP_MAX_POINTS_PLOT))
                    trajectory_fig.update(
                        "lookahead_path",
                        new_waypoints_np[path_indices.astype(int), 0],
                        new_waypoints_np[path_indices.astype(int), 1],
                        new_colour=[0, 0.7, 0.7])
                    forward_speed_fig.roll("forward_speed", current_timestamp,
                                           current_speed)
                    forward_speed_fig.roll("reference_signal",
                                           current_timestamp,
                                           controller._desired_speed)

                    throttle_fig.roll("throttle", current_timestamp,
                                      cmd_throttle)
                    brake_fig.roll("brake", current_timestamp, cmd_brake)
                    steer_fig.roll("steer", current_timestamp, cmd_steer)

                    # Refresh the live plot based on the refresh rate
                    # set by the options
                    if enable_live_plot and \
                   live_plot_timer.has_exceeded_lap_period():
                        lp_traj.refresh()
                        lp_1d.refresh()
                        live_plot_timer.lap()

            # Output controller command to CARLA server
                send_control_command(client,
                                     throttle=cmd_throttle,
                                     steer=cmd_steer,
                                     brake=cmd_brake)

                # Find if reached the end of waypoint. If the car is within
                # DIST_THRESHOLD_TO_LAST_WAYPOINT to the last waypoint,
                # the simulation will end.
                dist_to_last_waypoint = np.linalg.norm(
                    np.array([
                        waypoints[-1][0] -
                        current_x,  ###########changed waypoints[-1]
                        waypoints[-1][1] - current_y
                    ]))
                if dist_to_last_waypoint < DIST_THRESHOLD_TO_LAST_WAYPOINT:
                    reached_the_end = True
                if reached_the_end:
                    break
            else:
                send_control_command(client,
                                     throttle=0.0,
                                     steer=0.0,
                                     brake=1.0)
                break

        # End of demo - Stop vehicle and Store outputs to the controller output
        # directory.
        if reached_the_end:
            print("Reached the end of path. Writing to controller_output...")
            send_control_command(client, throttle=0.0, steer=0.0, brake=1.0)
        else:
            print("stop!!!!.")
        # Stop the car
        #send_control_command(client, throttle=0.0, steer=0.0, brake=1.0)
        # Store the various outputs
        store_trajectory_plot(trajectory_fig.fig, 'trajectory.png')
        store_trajectory_plot(forward_speed_fig.fig, 'forward_speed.png')
        store_trajectory_plot(throttle_fig.fig, 'throttle_output.png')
        store_trajectory_plot(brake_fig.fig, 'brake_output.png')
        store_trajectory_plot(steer_fig.fig, 'steer_output.png')
        write_trajectory_file(x_history, y_history, speed_history,
                              time_history)
コード例 #24
0
def run_carla_client(host, port, far):
    # Here we will run a single episode with 300 frames.
    number_of_frames = 3000
    frame_step = 10  # Save one image every 100 frames
    output_folder = '_out'
    image_size = [1280, 720]
    camera_local_pos = [0.7, 0.0, 1.3]  # [X, Y, Z]
    camera_local_rotation = [0, 0, 0]  # [pitch(Y), yaw(Z), roll(X)]
    fov = 59
    WINDOW_WIDTH = 1280
    WINDOW_HEIGHT = 720
    MINI_WINDOW_WIDTH = 320
    MINI_WINDOW_HEIGHT = 180

    WINDOW_WIDTH_HALF = WINDOW_WIDTH / 2
    WINDOW_HEIGHT_HALF = WINDOW_HEIGHT / 2

    k = np.identity(3)
    k[0, 2] = WINDOW_WIDTH_HALF
    k[1, 2] = WINDOW_HEIGHT_HALF
    k[0, 0] = k[1, 1] = WINDOW_WIDTH / \
        (2.0 * math.tan(59.0 * math.pi / 360.0))
    intrinsic = k
    # Connect with the server
    with make_carla_client(host, port) as client:
        print('CarlaClient connected')

        # Here we load the settings.
        settings = CarlaSettings()
        settings.set(SynchronousMode=True,
                     SendNonPlayerAgentsInfo=False,
                     NumberOfVehicles=20,
                     NumberOfPedestrians=40,
                     WeatherId=1)
        settings.randomize_seeds()

        camera1 = Camera('CameraDepth', PostProcessing='Depth', FOV=fov)
        camera1.set_image_size(*image_size)
        camera1.set_position(*camera_local_pos)
        camera1.set_rotation(*camera_local_rotation)
        settings.add_sensor(camera1)

        camera2 = Camera('CameraRGB', PostProcessing='SceneFinal', FOV=fov)
        camera2.set_image_size(*image_size)
        camera2.set_position(*camera_local_pos)
        camera2.set_rotation(*camera_local_rotation)
        settings.add_sensor(camera2)
        #scene = client.load_settings(settings)
        client.load_settings(settings)
        #print("sjdsjhdjshdjshdjshgds",scene.player_start_spots[0].location.x)
        # Start at location index id '0'
        client.start_episode(0)

        # Compute the camera transform matrix
        camera_to_car_transform = camera2.get_unreal_transform()
        print("camera_to_car_transform", camera_to_car_transform)
        carla_utils_obj = segmentation.carla_utils(intrinsic)

        # Iterate every frame in the episode except for the first one.
        for frame in range(1, number_of_frames):
            # Read the data produced by the server this frame.
            measurements, sensor_data = client.read_data()

            # Save one image every 'frame_step' frames
            if not frame % frame_step:
                # Start transformations time mesure.

                # RGB image [[[r,g,b],..[r,g,b]],..[[r,g,b],..[r,g,b]]]
                image_RGB = to_rgb_array(sensor_data['CameraRGB'])
                image_depth = to_rgb_array(sensor_data['CameraDepth'])
                #measurements.player_measurements.transform.location.x-=40
                #measurements.player_measurements.transform.location.z-=2
                world_transform = Transform(
                    measurements.player_measurements.transform)

                im_bgr = cv2.cvtColor(image_RGB, cv2.COLOR_RGB2BGR)

                pos_vector = ([
                    measurements.player_measurements.transform.location.x,
                    measurements.player_measurements.transform.location.y,
                    measurements.player_measurements.transform.location.z
                ])

                fdfd = str(world_transform)
                sdsd = fdfd[1:-1].split('\n')
                new = []
                for i in sdsd:
                    dd = i[:-1].lstrip('[ ')
                    ff = re.sub("\s+", ",", dd.strip())
                    gg = ff.split(',')
                    new.append([float(i) for i in gg])
                newworld = np.array(new)
                fdfd = str(camera_to_car_transform)
                sdsd = fdfd[1:-1].split('\n')
                new = []
                for i in sdsd:
                    dd = i[:-1].lstrip('[ ')
                    ff = re.sub("\s+", ",", dd.strip())
                    gg = ff.split(',')
                    new.append([float(i) for i in gg])
                newcam = np.array(new)
                extrinsic = np.matmul(newworld, newcam)
                get_2d_point = carla_utils_obj.run_seg(im_bgr, extrinsic,
                                                       pos_vector)
                print(get_2d_point)
                depth = image_depth[int(get_2d_point[0]), int(get_2d_point[1])]
                scale = depth[0] + depth[1] * 256 + depth[2] * 256 * 256
                scale = scale / ((256 * 256 * 256) - 1)
                depth = scale * 1000
                pos2d = np.array([(WINDOW_WIDTH - get_2d_point[1]) * depth,
                                  (WINDOW_HEIGHT - get_2d_point[0]) * depth,
                                  depth])

                first = np.dot(np.linalg.inv(intrinsic), pos2d)
                first = np.append(first, 1)
                sec = np.matmul((extrinsic), first)
                print("present", pos_vector)
                print('goal-', sec)
            client.send_control(
                measurements.player_measurements.autopilot_control)
コード例 #25
0
ファイル: tmp_main.py プロジェクト: xjy8943/candy
    def get_obs(self):

        self._timer.tick()

        measurements, sensor_data = self.client.read_data()

        self.auto_control = measurements.player_measurements.autopilot_control

        self._main_image = sensor_data.get('CameraForHuman', None)

        rgb_image = sensor_data.get('CameraRGB', None)
        depth_image = sensor_data.get('CameraDepth', None)

        rgb_image = image_converter.to_rgb_array(rgb_image)
        rgb_image = (rgb_image.astype(np.float32) - 128) / 128
        depth_image = (
            np.max(image_converter.depth_to_logarithmic_grayscale(depth_image),
                   axis=2,
                   keepdims=True) - 128) / 128
        image = np.concatenate([rgb_image, depth_image], axis=2)

        if self.prev_image is None:
            self.prev_image = image

        now_image = image
        image = np.concatenate([self.prev_image, image], axis=2)

        self.prev_image = now_image

        collision = self.get_collision(measurements)

        control, reward = self._get_keyboard_control(pygame.key.get_pressed())
        reward, _ = self.calculate_reward(measurements, reward, collision)

        # Print measurements every second.
        if self._timer.elapsed_seconds_since_lap() > 1.0:
            if self._city_name is not None:
                # Function to get car position on map.
                map_position = self._map.convert_to_pixel([
                    measurements.player_measurements.transform.location.x,
                    measurements.player_measurements.transform.location.y,
                    measurements.player_measurements.transform.location.z
                ])
                # Function to get orientation of the road car is in.
                lane_orientation = self._map.get_lane_orientation([
                    measurements.player_measurements.transform.location.x,
                    measurements.player_measurements.transform.location.y,
                    measurements.player_measurements.transform.location.z
                ])

                self._print_player_measurements_map(
                    measurements.player_measurements, map_position,
                    lane_orientation, reward)
            else:
                self._print_player_measurements(
                    measurements.player_measurements)

            # Plot position on the map as well.

            self._timer.lap()

        # Set the player position
        if self._city_name is not None:
            self._position = self._map.convert_to_pixel([
                measurements.player_measurements.transform.location.x,
                measurements.player_measurements.transform.location.y,
                measurements.player_measurements.transform.location.z
            ])
            self._agent_positions = measurements.non_player_agents
        self.step += 1
        done = False
        if self.step > STEP_LIMIT:
            done = True
            self.step = 0

        return ((image,
                 [measurements.player_measurements.forward_speed * 3.6 / 100]),
                reward, done)
コード例 #26
0
    def _on_render(self):
        measurements, sensor_data = self.client.read_data()

        temp_array = np.zeros((240, 320, 1))
        gap_x = (WINDOW_WIDTH - 2 * MINI_WINDOW_WIDTH) / 3
        mini_image_y = WINDOW_HEIGHT - MINI_WINDOW_HEIGHT - gap_x

        # array = image_converter.labels_to_cityscapes_palette(self._mini_view_image2)
        # sem_return = array
        # print("sem return is " + str(np.shape(np.array(sem_return))))

        if self._main_image is not None:
            array = image_converter.to_rgb_array(self._main_image)
            surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))
            self._display.blit(surface, (0, 0))

        if self._mini_view_image1 is not None:
            array = image_converter.depth_to_logarithmic_grayscale(
                self._mini_view_image1)
            surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))
            self._display.blit(surface, (gap_x, mini_image_y))

        if self._mini_view_image2 is not None:
            # print("Entered")
            array = image_converter.labels_to_cityscapes_palette(
                self._mini_view_image2)
            # temp_array = array[:,:,0]
            # plt.imshow(temp_array[:,:,0])

            if self._enable_imitation:
                print("Size of input is " + str(np.shape(np.asarray(array))))
                control = measurements.player_measurements.autopilot_control

                # img = np.asarray(array[:,:,0])
                img = array
                # np.reshape(img, img.shape + (1,))

                print("Shape of img is " + str(np.shape(img)))

                # plt.imshow(img[:,:,0])
                # plt.show()

                img = cv2.resize(img[:, :, :], (320, 240))

                img = img.reshape(1, 240, 320, 3)

                print("Shape of img is " + str(np.shape(img)))

                img = img[:, :, :, 0]

                img = img.reshape(1, 240, 320, 1)

                print("Shape of img is " + str(np.shape(img)))

                plt.imshow(img[0, :, :, 0])

                # # try:
                control.steer = float(
                    self.model.predict(img[:, :, :], batch_size=1)) / 10.0
                control.throttle = 0.5
                # # except:
                # #     control.steer = 0.0

                print("Steer is " + str(control.steer))

                self.client.send_control(control)

            surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))

            self._display.blit(surface,
                               (2 * gap_x + MINI_WINDOW_WIDTH, mini_image_y))

        if self._lidar_measurement is not None:
            lidar_data = np.array(self._lidar_measurement.data[:, :2])
            lidar_data *= 2.0
            lidar_data += 100.0
            lidar_data = np.fabs(lidar_data)
            lidar_data = lidar_data.astype(np.int32)
            lidar_data = np.reshape(lidar_data, (-1, 2))
            #draw lidar
            lidar_img_size = (200, 200, 3)
            lidar_img = np.zeros(lidar_img_size)
            lidar_img[tuple(lidar_data.T)] = (255, 255, 255)
            surface = pygame.surfarray.make_surface(lidar_img)
            self._display.blit(surface, (10, 10))

        if self._map_view is not None:
            array = self._map_view
            array = array[:, :, :3]

            new_window_width = \
                (float(WINDOW_HEIGHT) / float(self._map_shape[0])) * \
                float(self._map_shape[1])
            surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))

            w_pos = int(self._position[0] *
                        (float(WINDOW_HEIGHT) / float(self._map_shape[0])))
            h_pos = int(self._position[1] *
                        (new_window_width / float(self._map_shape[1])))

            pygame.draw.circle(surface, [255, 0, 0, 255], (w_pos, h_pos), 6, 0)
            for agent in self._agent_positions:
                if agent.HasField('vehicle'):
                    agent_position = self._map.convert_to_pixel([
                        agent.vehicle.transform.location.x,
                        agent.vehicle.transform.location.y,
                        agent.vehicle.transform.location.z
                    ])

                    w_pos = int(
                        agent_position[0] *
                        (float(WINDOW_HEIGHT) / float(self._map_shape[0])))
                    h_pos = int(agent_position[1] *
                                (new_window_width / float(self._map_shape[1])))

                    pygame.draw.circle(surface, [255, 0, 255, 255],
                                       (w_pos, h_pos), 4, 0)

            self._display.blit(surface, (WINDOW_WIDTH, 0))

        pygame.display.flip()
コード例 #27
0
    def _on_render(self):
        datapoints = []

        if self._main_image is not None and self._depth_image is not None:
            # Convert main image
            image = image_converter.to_rgb_array(self._main_image)

            # Retrieve and draw datapoints
            image, datapoints = self._generate_datapoints(image)

            # Draw lidar
            # Camera coordinate system is left, up, forwards
            if VISUALIZE_LIDAR:
                # Calculation to shift bboxes relative to pitch and roll of player
                rotation = self._measurements.player_measurements.transform.rotation
                pitch, roll, yaw = rotation.pitch, rotation.roll, rotation.yaw
                # Since measurements are in degrees, convert to radians

                pitch = degrees_to_radians(pitch)
                roll = degrees_to_radians(roll)
                yaw = degrees_to_radians(yaw)
                print('pitch: ', pitch)
                print('roll: ', roll)
                print('yaw: ', yaw)

                # Rotation matrix for pitch
                rotP = np.array([[cos(pitch), 0, sin(pitch)], [0, 1, 0],
                                 [-sin(pitch), 0, cos(pitch)]])
                # Rotation matrix for roll
                rotR = np.array([[1, 0, 0], [0, cos(roll), -sin(roll)],
                                 [0, sin(roll), cos(roll)]])

                # combined rotation matrix, must be in order roll, pitch, yaw
                rotRP = np.matmul(rotR, rotP)
                # Take the points from the point cloud and transform to car space
                point_cloud = np.array(
                    self._lidar_to_car_transform.transform_points(
                        self._lidar_measurement.data))
                point_cloud[:, 2] -= LIDAR_HEIGHT_POS
                point_cloud = np.matmul(rotRP, point_cloud.T).T
                # print(self._lidar_to_car_transform.matrix)
                # print(self._camera_to_car_transform.matrix)
                # Transform to camera space by the inverse of camera_to_car transform
                point_cloud_cam = self._camera_to_car_transform.inverse(
                ).transform_points(point_cloud)
                point_cloud_cam[:, 1] += LIDAR_HEIGHT_POS
                image = lidar_utils.project_point_cloud(
                    image, point_cloud_cam, self._intrinsic, 1)

            # Display image
            surface = pygame.surfarray.make_surface(image.swapaxes(0, 1))
            self._display.blit(surface, (0, 0))
            if self._map_view is not None:
                self._display_agents(self._map_view)
            pygame.display.flip()

            # Determine whether to save files
            distance_driven = self._distance_since_last_recording()
            #print("Distance driven since last recording: {}".format(distance_driven))
            has_driven_long_enough = distance_driven is None or distance_driven > DISTANCE_SINCE_LAST_RECORDING
            if (self._timer.step + 1) % STEPS_BETWEEN_RECORDINGS == 0:
                if has_driven_long_enough and datapoints:
                    # Avoid doing this twice or unnecessarily often
                    if not VISUALIZE_LIDAR:
                        # Calculation to shift bboxes relative to pitch and roll of player
                        rotation = self._measurements.player_measurements.transform.rotation
                        pitch, roll, yaw = rotation.pitch, rotation.roll, rotation.yaw
                        # Since measurements are in degrees, convert to radians

                        pitch = degrees_to_radians(pitch)
                        roll = degrees_to_radians(roll)
                        yaw = degrees_to_radians(yaw)
                        print('pitch: ', pitch)
                        print('roll: ', roll)
                        print('yaw: ', yaw)

                        # Rotation matrix for pitch
                        rotP = np.array([[cos(pitch), 0,
                                          sin(pitch)], [0, 1, 0],
                                         [-sin(pitch), 0,
                                          cos(pitch)]])
                        # Rotation matrix for roll
                        rotR = np.array([[1, 0, 0], [0,
                                                     cos(roll), -sin(roll)],
                                         [0, sin(roll),
                                          cos(roll)]])

                        # combined rotation matrix, must be in order roll, pitch, yaw
                        rotRP = np.matmul(rotR, rotP)
                        # Take the points from the point cloud and transform to car space
                        point_cloud = np.array(
                            self._lidar_to_car_transform.transform_points(
                                self._lidar_measurement.data))
                        point_cloud[:, 2] -= LIDAR_HEIGHT_POS
                        point_cloud = np.matmul(rotRP, point_cloud.T).T
                    self._update_agent_location()
                    # Save screen, lidar and kitti training labels together with calibration and groundplane files
                    self._save_training_files(datapoints, point_cloud)
                    self.captured_frame_no += 1
                    self._captured_frames_since_restart += 1
                    self._frames_since_last_capture = 0
                else:
                    logging.debug(
                        "Could save datapoint, but agent has not driven {} meters since last recording (Currently {} meters)"
                        .format(DISTANCE_SINCE_LAST_RECORDING,
                                distance_driven))
            else:
                self._frames_since_last_capture += 1
                logging.debug(
                    "Could not save training data - no visible agents of selected classes in scene"
                )
コード例 #28
0
ファイル: q.py プロジェクト: xdongsheng/carmisitake
def run_carla_client(args):
    # Here we will run 3 episodes with 300 frames each.
    number_of_episodes = 500000
    frames_per_episode = 300




    # We assume the CARLA server is already waiting for a client to connect at
    # host:port. To create a connection we can use the `make_carla_client`
    # context manager, it creates a CARLA client object and starts the
    # connection. It will throw an exception if something goes wrong. The
    # context manager makes sure the connection is always cleaned up on exit.
    with make_carla_client(args.host, args.port) as client:
        print('CarlaClient connected')

        #########################
        for episode in range(0, number_of_episodes):
            # Start a new episode.

            if args.settings_filepath is None:

                # Create a CarlaSettings object. This object is a wrapper around
                # the CarlaSettings.ini file. Here we set the configuration we
                # want for the new episode.
                settings = CarlaSettings()
                settings.set(
                    SynchronousMode=True,
                    SendNonPlayerAgentsInfo=True,
                    NumberOfVehicles=0,
                    NumberOfPedestrians=0,
                    # WeatherId=random.choice([1, 3, 7, 8, 14]),
                    WeatherId=1,
                    QualityLevel=args.quality_level)
                settings.randomize_seeds()

                # Now we want to add a couple of cameras to the player vehicle.
                # We will collect the images produced by these cameras every
                # frame.

                # The default camera captures RGB images of the scene.
                camera0 = Camera('CameraRGB')
                # Set image resolution in pixels.
                camera0.set_image_size(210, 160)
                # Set its position relative to the car in meters.
                camera0.set_position(0.30, 0, 1.30)
                settings.add_sensor(camera0)

                # Let's add another camera producing ground-truth depth.
                camera1 = Camera('CameraDepth', PostProcessing='Depth')
                camera1.set_image_size(210, 160)
                camera1.set_position(0.30, 0, 1.30)
                settings.add_sensor(camera1)

                if args.lidar:
                    lidar = Lidar('Lidar32')
                    lidar.set_position(0, 0, 2.50)
                    lidar.set_rotation(0, 0, 0)
                    lidar.set(
                        Channels=32,
                        Range=50,
                        PointsPerSecond=100000,
                        RotationFrequency=10,
                        UpperFovLimit=10,
                        LowerFovLimit=-30)
                    settings.add_sensor(lidar)

            else:

                # Alternatively, we can load these settings from a file.
                with open(args.settings_filepath, 'r') as fp:
                    settings = fp.read()

            # Now we load these settings into the server. The server replies
            # with a scene description containing the available start spots for
            # the player. Here we can provide a CarlaSettings object or a
            # CarlaSettings.ini file as string.
            scene = client.load_settings(settings)

            # Choose one player start at random.
            #从哪里开始
           # number_of_player_starts = len(scene.player_start_spots)
        # player_start = random.randint(0, max(0, number_of_player_starts - 1))

            # Notify the server that we want to start the episode at the
            # player_start index. This function blocks until the server is ready
            # to start the episode.
            print('Starting new episode at %r...' % scene.map_name)
            #client.start_episode(player_start)
            client.start_episode(0)
            #--------------------------------------------------------------------

            # Iterate every frame in the episode.
            # Read the data produced by the server this frame.
            # 这个应该是获取到的数据
            measurements, sensor_data = client.read_data()

            # Print some of the measurements.
            image_RGB = to_rgb_array(sensor_data['CameraRGB'])
            image_RGB_real=image_RGB.flatten()
            print(image_RGB_real)
            print(image_RGB_real.shape[0])
            ###############################################################################################
            if k==1:
                player_measurements = measurements.player_measurements
                loc = np.array([player_measurements.transform.location.x, player_measurements.transform.location.y])
                RL = DeepQNetwork(n_actions=8,
                                  n_features=image_RGB_real.shape[0],
                                  learning_rate=0.01, e_greedy=0.9,
                                  replace_target_iter=100, memory_size=2000,
                                  e_greedy_increment=0.001, )

                total_steps = 0
            else:
                total_steps = 0
                print("rl运行完毕")
            k=2
            for frame in range(0, frames_per_episode):
                measurements, sensor_data = client.read_data()
                player_measurements = measurements.player_measurements
                other_lane = 100 * player_measurements.intersection_otherlane
                offroad = 100 * player_measurements.intersection_offroad
                # loc1=np.array([player_measurements.transform.location.x, player_measurements.transform.location.y])
                # print(offroad)
                # print(other_lane)
                # print(loc1)
                image_RGB = to_rgb_array(sensor_data['CameraRGB'])
                image_RGB_real = image_RGB.flatten()
                print_measurements(measurements)
                observation = image_RGB_real
                ep_r = 0
                done = False
                # while True:
                #     # env.render()

                action = RL.choose_action(observation)

                if not args.autopilot:
                    brake1=0.0
                    steer1=0.0
                    if (action > 4):
                        brake1 = actions[action]
                    else:
                        steer1 = actions[action]

                    client.send_control(
                        # steer=random.uniform(-1.0, 1.0),

                        steer=steer1,
                        throttle=0.6,
                        brake=brake1,
                        hand_brake=False,
                        reverse=False)


                else:

                    # Together with the measurements, the server has sent the
                    # control that the in-game autopilot would do this frame. We
                    # can enable autopilot by sending back this control to the
                    # server. We can modify it if wanted, here for instance we
                    # will add some noise to the steer.

                    control = measurements.player_measurements.autopilot_control
                    # control.steer += random.uniform(-0.1, 0.1)
                    client.send_control(control)

                # loc2=np.array([player_measurements.transform.location.x, player_measurements.transform.location.y])
                image_RGB = to_rgb_array(sensor_data['CameraRGB'])
                image_RGB_rea2 = image_RGB.flatten()
                observation_ = image_RGB_rea2
                reward = -other_lane - offroad+np.sqrt(np.square(player_measurements.transform.location.x-271.0)+np.square(player_measurements.transform.location.y-129.5))
                print(offroad)

                if offroad > 20 or other_lane>10:
                    print(111111111111111111111)
                    done = True

                    # the smaller theta and closer to center the better
                    # x, x_dot, theta, theta_dot = observation_
                    # r1 = (env.x_threshold - abs(x))/env.x_threshold - 0.8
                    # r2 = (env.theta_threshold_radians - abs(theta))/env.theta_threshold_radians - 0.5
                    # reward = r1 + r2

                RL.store_transition(observation, action, reward, observation_)

                ep_r += reward
                if total_steps > 100:
                    RL.learn(do_train=1)

                if done:
                    print('episode: ',
                              'ep_r: ', round(ep_r, 2),
                              ' epsilon: ', round(RL.epsilon, 2))

                    # client.start_episode(0)
                    break

                observation = observation_
                total_steps += 1

              #  RL.plot_cost()



                # Save the images to disk if requested.
                if args.save_images_to_disk:
                    for name, measurement in sensor_data.items():
                        filename = args.out_filename_format.format(episode, name, frame)
                        measurement.save_to_disk(filename)
            continue
コード例 #29
0
ファイル: manual_control.py プロジェクト: L0rdCha0s/carla
    def _on_render(self):
        gap_x = (WINDOW_WIDTH - 2 * MINI_WINDOW_WIDTH) / 3
        mini_image_y = WINDOW_HEIGHT - MINI_WINDOW_HEIGHT - gap_x

        if self._main_image is not None:
            array = image_converter.to_rgb_array(self._main_image)
            surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))
            self._display.blit(surface, (0, 0))

        if self._mini_view_image1 is not None:
            array = image_converter.depth_to_logarithmic_grayscale(self._mini_view_image1)
            surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))
            self._display.blit(surface, (gap_x, mini_image_y))

        if self._mini_view_image2 is not None:
            array = image_converter.labels_to_cityscapes_palette(
                self._mini_view_image2)
            surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))

            self._display.blit(
                surface, (2 * gap_x + MINI_WINDOW_WIDTH, mini_image_y))

        if self._lidar_measurement is not None:

            lidar_points = np.array(self._lidar_measurement.data['points'][:, :, :2])
            lidar_points /= 50.0
            lidar_points += 100.0
            lidar_points = np.fabs(lidar_points)
            lidar_points = lidar_points.astype(np.int32)
            lidar_points = np.reshape(lidar_points, (-1, 2))

            lidar_points_labels = self._lidar_measurement.data['labels']
            lidar_points_labels = np.reshape(lidar_points_labels, (-1))

            #draw lidar
            lidar_img_size = (200, 200, 3)
            lidar_img = np.zeros(lidar_img_size)

            for class_id, color in image_converter.semantic_segmentation_classes_to_colors.items():
                lidar_img[tuple(lidar_points[lidar_points_labels == class_id].T)] = color

            surface = pygame.surfarray.make_surface(
                lidar_img
            )
            self._display.blit(surface, (10, 10))

        if self._map_view is not None:
            array = self._map_view
            array = array[:, :, :3]
            new_window_width =(float(WINDOW_HEIGHT)/float(self._map_shape[0]))*float(self._map_shape[1])
            surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))

            w_pos = int(self._position[0]*(float(WINDOW_HEIGHT)/float(self._map_shape[0])))
            h_pos =int(self._position[1] *(new_window_width/float(self._map_shape[1])))

            pygame.draw.circle(surface, [255, 0, 0, 255], (w_pos,h_pos), 6, 0)
            for agent in self._agent_positions:
                if agent.HasField('vehicle'):
                    agent_position = self._map.get_position_on_map([
                        agent.vehicle.transform.location.x,
                        agent.vehicle.transform.location.y,
                        agent.vehicle.transform.location.z])
                    w_pos = int(agent_position[0]*(float(WINDOW_HEIGHT)/float(self._map_shape[0])))
                    h_pos =int(agent_position[1] *(new_window_width/float(self._map_shape[1])))
                    pygame.draw.circle(surface, [255, 0, 255, 255], (w_pos,h_pos), 4, 0)

            self._display.blit(surface, (WINDOW_WIDTH, 0))

        pygame.display.flip()
コード例 #30
0
ファイル: gather_dataset.py プロジェクト: MariaPriisalu/spl
def run_carla_client( args):
    # Here we will run 3 episodes with 300 frames each.
    number_of_episodes = 150
    frames_per_episode = 500

    # for start_i in range(150):
    #     if start_i%4==0:
    #         output_folder = 'Packages/CARLA_0.8.2/PythonClient/new_data-viz/test_' + str(start_i)
    #         if not os.path.exists(output_folder):
    #             os.mkdir(output_folder)
    #             print( "make "+str(output_folder))
    # ./CarlaUE4.sh -carla-server  -benchmark -fps=17 -windowed
    # carla-server "/usr/local/carla/Unreal/CarlaUE4/CarlaUE4.uproject" /usr/local/carla/Maps/Town03 -benchmark -fps=10 -windowed


    # We assume the CARLA server is already waiting for a client to connect at
    # host:port. To create a connection we can use the `make_carla_client`
    # context manager, it creates a CARLA client object and starts the
    # connection. It will throw an exception if something goes wrong. The
    # context manager makes sure the connection is always cleaned up on exit.
    with make_carla_client(args.host, args.port) as client:

        print('CarlaClient connected')
        global episode_nbr
        print (episode_nbr)
        for episode in range(0,number_of_episodes):
            if episode % 1 == 0:
                output_folder = 'Datasets/carla-sync/train/test_' + str(episode)
                if not os.path.exists(output_folder+"/cameras.p"):
                    # Start a new episode.
                    episode_nbr=episode
                    frame_step = 1  # Save one image every 100 frames
                    pointcloud_step=50
                    image_size = [800, 600]
                    camera_local_pos = [0.3, 0.0, 1.3]  # [X, Y, Z]
                    camera_local_rotation = [0, 0, 0]  # [pitch(Y), yaw(Z), roll(X)]
                    fov = 70
                    # Create a CarlaSettings object. This object is a wrapper around
                    # the CarlaSettings.ini file. Here we set the configuration we
                    # want for the new episode.
                    settings = CarlaSettings()
                    settings.set(
                        SynchronousMode=True,
                        SendNonPlayerAgentsInfo=True,
                        NumberOfVehicles=50,
                        NumberOfPedestrians=200,
                        WeatherId=random.choice([1, 3, 7, 8, 14]),
                        QualityLevel=args.quality_level)
                    settings.randomize_seeds()

                    # Now we want to add a couple of cameras to the player vehicle.
                    # We will collect the images produced by these cameras every
                    # frame.


                    camera1 = Camera('CameraDepth', PostProcessing='Depth', FOV=fov)
                    camera1.set_image_size(*image_size)
                    camera1.set_position(*camera_local_pos)
                    camera1.set_rotation(*camera_local_rotation)
                    settings.add_sensor(camera1)

                    camera2 = Camera('CameraRGB', PostProcessing='SceneFinal', FOV=fov)
                    camera2.set_image_size(*image_size)
                    camera2.set_position(*camera_local_pos)
                    camera2.set_rotation(*camera_local_rotation)
                    settings.add_sensor(camera2)

                    camera3 = Camera('CameraSeg', PostProcessing='SemanticSegmentation', FOV=fov)
                    camera3.set_image_size(*image_size)
                    camera3.set_position(*camera_local_pos)
                    camera3.set_rotation(*camera_local_rotation)
                    settings.add_sensor(camera3)



                    # Now we load these settings into the server. The server replies
                    # with a scene description containing the available start spots for
                    # the player. Here we can provide a CarlaSettings object or a
                    # CarlaSettings.ini file as string.
                    scene = client.load_settings(settings)

                    # Choose one player start at random.
                    number_of_player_starts = len(scene.player_start_spots)
                    player_start = episode#random.randint(0, max(0, number_of_player_starts - 1))

                    output_folder = 'Datasets/carla-sync/train/test_' + str(episode)
                    # Notify the server that we want to start the episode at the
                    # player_start index. This function blocks until the server is ready
                    # to start the episode.
                    print('Starting new episode...')
                    client.start_episode(player_start)


                    cameras_dict = {}
                    pedestrians_dict = {}
                    cars_dict = {}
                    # Compute the camera transform matrix
                    camera_to_car_transform = camera2.get_unreal_transform()
                    # (Intrinsic) (3, 3) K Matrix
                    K = np.identity(3)
                    K[0, 2] = image_size[0] / 2.0
                    K[1, 2] = image_size[1] / 2.0
                    K[0, 0] = K[1, 1] = image_size[0] / (2.0 * np.tan(fov * np.pi / 360.0))
                    with open(output_folder + '/camera_intrinsics.p', 'w') as camfile:
                        pickle.dump(K, camfile)


                    # Iterate every frame in the episode.
                    for frame in range(0, frames_per_episode):

                        # Read the data produced by the server this frame.
                        measurements, sensor_data = client.read_data()

                        # Print some of the measurements.
                        #print_measurements(measurements)
                        if not frame % frame_step:
                            # Save the images to disk if requested.

                            for name, measurement in sensor_data.items():
                                filename = args.out_filename_format.format(episode, name, frame)
                                print (filename)
                                measurement.save_to_disk(filename)

                            # We can access the encoded data of a given image as numpy
                            # array using its "data" property. For instance, to get the
                            # depth value (normalized) at pixel X, Y
                            #
                            #     depth_array = sensor_data['CameraDepth'].data
                            #     value_at_pixel = depth_array[Y, X]
                            #

                            # Now we have to send the instructions to control the vehicle.
                            # If we are in synchronous mode the server will pause the
                            # simulation until we send this control.

                            # RGB image [[[r,g,b],..[r,g,b]],..[[r,g,b],..[r,g,b]]]
                            image_RGB = to_rgb_array(sensor_data['CameraRGB'])

                            labels=labels_to_array(sensor_data['CameraSeg'])[:,:,np.newaxis]

                            image_seg = np.tile(labels, (1, 1, 3))
                            depth_array = sensor_data['CameraDepth'].data*1000


                            # 2d to (camera) local 3d
                            # We use the image_RGB to colorize each 3D point, this is optional.
                            # "max_depth" is used to keep only the points that are near to the
                            # camera, meaning 1.0 the farest points (sky)
                            if not frame % pointcloud_step:
                                point_cloud = depth_to_local_point_cloud(
                                    sensor_data['CameraDepth'],
                                    image_RGB,
                                    max_depth=args.far
                                )

                                point_cloud_seg = depth_to_local_point_cloud(
                                    sensor_data['CameraDepth'],
                                    segmentation=image_seg,
                                    max_depth=args.far
                                )

                            # (Camera) local 3d to world 3d.
                            # Get the transform from the player protobuf transformation.
                            world_transform = Transform(
                                measurements.player_measurements.transform
                            )

                            # Compute the final transformation matrix.
                            car_to_world_transform = world_transform * camera_to_car_transform

                            # Car to World transformation given the 3D points and the
                            # transformation matrix.
                            point_cloud.apply_transform(car_to_world_transform)
                            point_cloud_seg.apply_transform(car_to_world_transform)

                            Rt = car_to_world_transform.matrix
                            Rt_inv = car_to_world_transform.inverse().matrix
                            # R_inv=world_transform.inverse().matrix
                            cameras_dict[frame] = {}
                            cameras_dict[frame]['inverse_rotation'] = Rt_inv[:]
                            cameras_dict[frame]['rotation'] = Rt[:]
                            cameras_dict[frame]['translation'] = Rt_inv[0:3, 3]
                            cameras_dict[frame]['camera_to_car'] = camera_to_car_transform.matrix

                            # Get non-player info
                            vehicles = {}
                            pedestrians = {}
                            for agent in measurements.non_player_agents:
                                # check if the agent is a vehicle.
                                if agent.HasField('vehicle'):
                                    pos = agent.vehicle.transform.location
                                    pos_vector = np.array([[pos.x], [pos.y], [pos.z], [1.0]])

                                    trnasformed_3d_pos = np.dot(Rt_inv, pos_vector)
                                    pos2d = np.dot(K, trnasformed_3d_pos[:3])

                                    # Normalize the point
                                    norm_pos2d = np.array([
                                        pos2d[0] / pos2d[2],
                                        pos2d[1] / pos2d[2],
                                        pos2d[2]])

                                    # Now, pos2d contains the [x, y, d] values of the image in pixels (where d is the depth)
                                    # You can use the depth to know the points that are in front of the camera (positive depth).

                                    x_2d = image_size[0] - norm_pos2d[0]
                                    y_2d = image_size[1] - norm_pos2d[1]
                                    vehicles[agent.id] = {}
                                    vehicles[agent.id]['transform'] = [agent.vehicle.transform.location.x,
                                                                       agent.vehicle.transform.location.y,
                                                                       agent.vehicle.transform.location.z]
                                    vehicles[agent.id][
                                        'bounding_box.transform'] = agent.vehicle.bounding_box.transform.location.z

                                    vehicles[agent.id]['yaw'] = agent.vehicle.transform.rotation.yaw
                                    vehicles[agent.id]['bounding_box'] = [agent.vehicle.bounding_box.extent.x,
                                                                          agent.vehicle.bounding_box.extent.y,
                                                                          agent.vehicle.bounding_box.extent.z]
                                    vehicle_transform = Transform(agent.vehicle.bounding_box.transform)
                                    pos = agent.vehicle.transform.location

                                    bbox3d = agent.vehicle.bounding_box.extent

                                    # Compute the 3D bounding boxes
                                    # f contains the 4 points that corresponds to the bottom
                                    f = np.array([[pos.x + bbox3d.x, pos.y - bbox3d.y,
                                                   pos.z - bbox3d.z + agent.vehicle.bounding_box.transform.location.z],
                                                  [pos.x + bbox3d.x, pos.y + bbox3d.y,
                                                   pos.z - bbox3d.z + agent.vehicle.bounding_box.transform.location.z],
                                                  [pos.x - bbox3d.x, pos.y + bbox3d.y,
                                                   pos.z - bbox3d.z + agent.vehicle.bounding_box.transform.location.z],
                                                  [pos.x - bbox3d.x, pos.y - bbox3d.y,
                                                   pos.z - bbox3d.z + agent.vehicle.bounding_box.transform.location.z],
                                                  [pos.x + bbox3d.x, pos.y - bbox3d.y,
                                                   pos.z + bbox3d.z + agent.vehicle.bounding_box.transform.location.z],
                                                  [pos.x + bbox3d.x, pos.y + bbox3d.y,
                                                   pos.z + bbox3d.z + agent.vehicle.bounding_box.transform.location.z],
                                                  [pos.x - bbox3d.x, pos.y + bbox3d.y,
                                                   pos.z + bbox3d.z + agent.vehicle.bounding_box.transform.location.z],
                                                  [pos.x - bbox3d.x, pos.y - bbox3d.y,
                                                   pos.z + bbox3d.z + agent.vehicle.bounding_box.transform.location.z]])

                                    f_rotated = vehicle_transform.transform_points(f)
                                    f_2D_rotated = []
                                    vehicles[agent.id]['bounding_box_coord'] = f_rotated

                                    for i in range(f.shape[0]):
                                        point = np.array([[f_rotated[i, 0]], [f_rotated[i, 1]], [f_rotated[i, 2]], [1]])
                                        transformed_2d_pos = np.dot(Rt_inv, point)
                                        pos2d = np.dot(K, transformed_2d_pos[:3])
                                        norm_pos2d = np.array([
                                            pos2d[0] / pos2d[2],
                                            pos2d[1] / pos2d[2],
                                            pos2d[2]])
                                        # print([image_size[0] - (pos2d[0] / pos2d[2]), image_size[1] - (pos2d[1] / pos2d[2])])
                                        f_2D_rotated.append(
                                            np.array([image_size[0] - norm_pos2d[0], image_size[1] - norm_pos2d[1]]))
                                    vehicles[agent.id]['bounding_box_2D'] = f_2D_rotated


                                elif agent.HasField('pedestrian'):
                                    pedestrians[agent.id] = {}
                                    pedestrians[agent.id]['transform'] = [agent.pedestrian.transform.location.x,
                                                                          agent.pedestrian.transform.location.y,
                                                                          agent.pedestrian.transform.location.z]
                                    pedestrians[agent.id]['yaw'] = agent.pedestrian.transform.rotation.yaw
                                    pedestrians[agent.id]['bounding_box'] = [agent.pedestrian.bounding_box.extent.x,
                                                                             agent.pedestrian.bounding_box.extent.y,
                                                                             agent.pedestrian.bounding_box.extent.z]
                                    # get the needed transformations
                                    # remember to explicitly make it Transform() so you can use transform_points()
                                    pedestrian_transform = Transform(agent.pedestrian.transform)
                                    bbox_transform = Transform(agent.pedestrian.bounding_box.transform)

                                    # get the box extent
                                    ext = agent.pedestrian.bounding_box.extent
                                    # 8 bounding box vertices relative to (0,0,0)
                                    bbox = np.array([
                                        [  ext.x,   ext.y,   ext.z],
                                        [- ext.x,   ext.y,   ext.z],
                                        [  ext.x, - ext.y,   ext.z],
                                        [- ext.x, - ext.y,   ext.z],
                                        [  ext.x,   ext.y, - ext.z],
                                        [- ext.x,   ext.y, - ext.z],
                                        [  ext.x, - ext.y, - ext.z],
                                        [- ext.x, - ext.y, - ext.z]
                                    ])

                                    # transform the vertices respect to the bounding box transform
                                    bbox = bbox_transform.transform_points(bbox)

                                    # the bounding box transform is respect to the pedestrian transform
                                    # so let's transform the points relative to it's transform
                                    bbox = pedestrian_transform.transform_points(bbox)

                                    # pedestrian's transform is relative to the world, so now,
                                    # bbox contains the 3D bounding box vertices relative to the world
                                    pedestrians[agent.id]['bounding_box_coord'] =copy.deepcopy(bbox)

                                    # Additionally, you can print these vertices to check that is working
                                    f_2D_rotated=[]
                                    ys=[]
                                    xs=[]
                                    zs=[]
                                    for vertex in bbox:
                                        pos_vector = np.array([
                                            [vertex[0,0]],  # [[X,
                                            [vertex[0,1]],  #   Y,
                                            [vertex[0,2]],  #   Z,
                                            [1.0]           #   1.0]]
                                        ])

                                        # transform the points to camera
                                        transformed_3d_pos =np.dot(Rt_inv, pos_vector)# np.dot(inv(self._extrinsic.matrix), pos_vector)
                                        zs.append(transformed_3d_pos[2])
                                        # transform the points to 2D
                                        pos2d =np.dot(K, transformed_3d_pos[:3]) #np.dot(self._intrinsic, transformed_3d_pos[:3])

                                        # normalize the 2D points
                                        pos2d = np.array([
                                            pos2d[0] / pos2d[2],
                                            pos2d[1] / pos2d[2],
                                            pos2d[2]
                                        ])

                                        # print the points in the screen
                                        if pos2d[2] > 0: # if the point is in front of the camera
                                            x_2d = image_size[0]-pos2d[0]#WINDOW_WIDTH - pos2d[0]
                                            y_2d = image_size[1]-pos2d[1]#WINDOW_HEIGHT - pos2d[1]
                                            ys.append(y_2d)
                                            xs.append(x_2d)
                                            f_2D_rotated.append( (y_2d, x_2d))
                                    if len(xs)>1:
                                        bbox=[[int(min(xs)), int(max(xs))],[int(min(ys)), int(max(ys))]]
                                        clipped_seg=labels[bbox[1][0]:bbox[1][1],bbox[0][0]:bbox[0][1]]
                                        recounted = Counter(clipped_seg.flatten())


                                        if 4 in recounted.keys() and recounted[4]>0.1*len(clipped_seg.flatten()):
                                            clipped_depth=depth_array[bbox[1][0]:bbox[1][1],bbox[0][0]:bbox[0][1]]
                                            #print (clipped_depth.shape)
                                            people_indx=np.where(clipped_seg==4)
                                            masked_depth=[]
                                            for people in zip(people_indx[0],people_indx[1] ):
                                                #print(people)
                                                masked_depth.append(clipped_depth[people])
                                            #masked_depth=clipped_depth[np.where(clipped_seg==4)]
                                            #print (masked_depth)
                                            #print ("Depth "+ str(min(zs))+" "+ str(max(zs)))
                                            #recounted = Counter(masked_depth)
                                            #print(recounted)
                                            avg_depth=np.mean(masked_depth)
                                            if avg_depth<700 and avg_depth>=min(zs)-10 and avg_depth<= max(zs)+10:
                                                #print("Correct depth")
                                                pedestrians[agent.id]['bounding_box_2D'] = f_2D_rotated
                                                pedestrians[agent.id]['bounding_box_2D_size']=recounted[4]
                                                pedestrians[agent.id]['bounding_box_2D_avg_depth']=avg_depth
                                                pedestrians[agent.id]['bounding_box_2D_depths']=zs
                                                #print ( pedestrians[agent.id].keys())
                                            #else:
                                                # print(recounted)
                                                # print ("Depth "+ str(min(zs))+" "+ str(max(zs)))


                                    #if sum(norm(depth_array-np.mean(zs))<1.0):


                                    # pedestrians[agent.id] = {}
                                    # pedestrians[agent.id]['transform'] = [agent.pedestrian.transform.location.x,
                                    #                                       agent.pedestrian.transform.location.y,
                                    #                                       agent.pedestrian.transform.location.z]
                                    # pedestrians[agent.id]['yaw'] = agent.pedestrian.transform.rotation.yaw
                                    # pedestrians[agent.id]['bounding_box'] = [agent.pedestrian.bounding_box.extent.x,
                                    #                                          agent.pedestrian.bounding_box.extent.y,
                                    #                                          agent.pedestrian.bounding_box.extent.z]
                                    # vehicle_transform = Transform(agent.pedestrian.bounding_box.transform)
                                    # pos = agent.pedestrian.transform.location
                                    #
                                    # bbox3d = agent.pedestrian.bounding_box.extent
                                    #
                                    # # Compute the 3D bounding boxes
                                    # # f contains the 4 points that corresponds to the bottom
                                    # f = np.array([[pos.x + bbox3d.x, pos.y - bbox3d.y, pos.z- bbox3d.z ],
                                    #               [pos.x + bbox3d.x, pos.y + bbox3d.y, pos.z- bbox3d.z ],
                                    #               [pos.x - bbox3d.x, pos.y + bbox3d.y, pos.z- bbox3d.z ],
                                    #               [pos.x - bbox3d.x, pos.y - bbox3d.y, pos.z- bbox3d.z ],
                                    #               [pos.x + bbox3d.x, pos.y - bbox3d.y, pos.z + bbox3d.z],
                                    #               [pos.x + bbox3d.x, pos.y + bbox3d.y, pos.z + bbox3d.z],
                                    #               [pos.x - bbox3d.x, pos.y + bbox3d.y, pos.z + bbox3d.z],
                                    #               [pos.x - bbox3d.x, pos.y - bbox3d.y, pos.z + bbox3d.z]])
                                    #
                                    # f_rotated = pedestrian_transform.transform_points(f)
                                    # pedestrians[agent.id]['bounding_box_coord'] = f_rotated
                                    # f_2D_rotated = []
                                    #
                                    # for i in range(f.shape[0]):
                                    #     point = np.array([[f_rotated[i, 0]], [f_rotated[i, 1]], [f_rotated[i, 2]], [1]])
                                    #     transformed_2d_pos = np.dot(Rt_inv, point)
                                    #     pos2d = np.dot(K, transformed_2d_pos[:3])
                                    #     norm_pos2d = np.array([
                                    #         pos2d[0] / pos2d[2],
                                    #         pos2d[1] / pos2d[2],
                                    #         pos2d[2]])
                                    #     f_2D_rotated.append([image_size[0] - norm_pos2d[0], image_size[1] - norm_pos2d[1]])
                                    # pedestrians[agent.id]['bounding_box_2D'] = f_2D_rotated

                            cars_dict[frame] = vehicles

                            pedestrians_dict[frame] = pedestrians
                            #print("End of Episode")
                            #print(len(pedestrians_dict[frame]))

                            # Save PLY to disk
                            # This generates the PLY string with the 3D points and the RGB colors
                            # for each row of the file.
                            if not frame % pointcloud_step:
                                point_cloud.save_to_disk(os.path.join(
                                    output_folder, '{:0>5}.ply'.format(frame))
                                )
                                point_cloud_seg.save_to_disk(os.path.join(
                                    output_folder, '{:0>5}_seg.ply'.format(frame))
                                )

                        if not args.autopilot:

                            client.send_control(
                                hand_brake=True)

                        else:

                            # Together with the measurements, the server has sent the
                            # control that the in-game autopilot would do this frame. We
                            # can enable autopilot by sending back this control to the
                            # server. We can modify it if wanted, here for instance we
                            # will add some noise to the steer.

                            control = measurements.player_measurements.autopilot_control
                            control.steer += random.uniform(-0.1, 0.1)
                            client.send_control(control)
                    print ("Start pickle save")
                    with open(output_folder + '/cameras.p', 'w') as camerafile:
                        pickle.dump(cameras_dict, camerafile)
                    with open(output_folder + '/people.p', 'w') as peoplefile:
                        pickle.dump(pedestrians_dict, peoplefile)
                    with open(output_folder + '/cars.p', 'w') as carfile:
                        pickle.dump(cars_dict, carfile)
                    print ("Episode done")
コード例 #31
0
ファイル: carla_game.py プロジェクト: zxbnjust/data-collector
    def render(self, camera_to_render, objects_to_render):
        """
        Main rendering function. Render a main camera and a map containing several objects
        Args:
            camera_to_render: The sensor data, images you want to render on the window
            objects_to_render: A dictionary Several objects to be rendered
                player_position: The current player position
                waypoints: The waypoints , next positions. If you want to plot then.
                agents_positions: All agent positions ( vehicles)

        Returns:
            None
        """

        if camera_to_render is not None:
            array = image_converter.to_rgb_array(camera_to_render)
            surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))
            self._display.blit(surface, (0, 0))

        # only if the map view setting is set we actually plot all the positions and waypoints
        if self._map_view is not None:

            player_position = self._map.convert_to_pixel([
                objects_to_render['player_transform'].location.x,
                objects_to_render['player_transform'].location.y,
                objects_to_render['player_transform'].location.z
            ])
            player_orientation = objects_to_render[
                'player_transform'].orientation

            array = self._map_view
            array = array[:, :, :3]

            new_window_width = \
                (float(self._window_height) / float(self._map_shape[0])) * \
                float(self._map_shape[1])
            surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))

            # Draw other two fovs
            if objects_to_render['fov_list'] is not None:
                fov_1 = objects_to_render['fov_list'][0]
                fov_2 = objects_to_render['fov_list'][1]

                self._draw_fov(surface,
                               player_position,
                               vector_to_degrees([
                                   player_orientation.x, player_orientation.y
                               ]),
                               radius=fov_1[0] / (0.1643 * 2),
                               angle=fov_1[1],
                               color=[255, 128, 0, 255])
                self._draw_fov(surface,
                               player_position,
                               vector_to_degrees([
                                   player_orientation.x, player_orientation.y
                               ]),
                               radius=fov_2[0] / (0.1643 * 2),
                               angle=fov_2[1],
                               color=[128, 64, 0, 255])

            if objects_to_render['waypoints'] is not None:
                self._draw_waypoints(surface, objects_to_render['waypoints'])
            if objects_to_render['route'] is not None:
                self._draw_route(surface, objects_to_render['route'])
            self._draw_goal_position(surface)

            # Draw the player positions
            w_pos = int(
                player_position[0] *
                (float(self._window_height) / float(self._map_shape[0])))
            h_pos = int(player_position[1] *
                        (new_window_width / float(self._map_shape[1])))
            pygame.draw.circle(surface, [0, 0, 0, 255], (w_pos, h_pos), 5, 0)

            for agent in objects_to_render['agents']:
                if agent.HasField('pedestrian'
                                  ) and objects_to_render['draw_pedestrians']:
                    if agent.id in objects_to_render['active_agents_ids']:
                        color = [255, 0, 255, 255]
                    else:
                        color = [0, 0, 255, 255]

                    agent_position = self._map.convert_to_pixel([
                        agent.pedestrian.transform.location.x,
                        agent.pedestrian.transform.location.y,
                        agent.pedestrian.transform.location.z
                    ])

                    w_pos = int(agent_position[0] *
                                (float(self._window_height) /
                                 float(self._map_shape[0])))
                    h_pos = int(agent_position[1] *
                                (new_window_width / float(self._map_shape[1])))

                    pygame.draw.circle(surface, color, (w_pos, h_pos), 2, 0)

                if agent.HasField('traffic_light') and objects_to_render[
                        'draw_traffic_lights']:
                    if agent.id in objects_to_render['active_agents_ids']:
                        color = [255, 0, 0, 255]
                    else:
                        color = [0, 255, 0, 255]

                    agent_position = self._map.convert_to_pixel([
                        agent.traffic_light.transform.location.x,
                        agent.traffic_light.transform.location.y,
                        agent.traffic_light.transform.location.z
                    ])

                    w_pos = int(agent_position[0] *
                                (float(self._window_height) /
                                 float(self._map_shape[0])))
                    h_pos = int(agent_position[1] *
                                (new_window_width / float(self._map_shape[1])))

                    pygame.draw.circle(surface, color, (w_pos, h_pos), 3, 0)
                if agent.HasField(
                        'vehicle') and objects_to_render['draw_vehicles']:
                    if agent.id in objects_to_render['active_agents_ids']:
                        color = [255, 0, 255, 255]
                    else:
                        color = [0, 0, 255, 255]

                    agent_position = self._map.convert_to_pixel([
                        agent.vehicle.transform.location.x,
                        agent.vehicle.transform.location.y,
                        agent.vehicle.transform.location.z
                    ])

                    w_pos = int(agent_position[0] *
                                (float(self._window_height) /
                                 float(self._map_shape[0])))
                    h_pos = int(agent_position[1] *
                                (new_window_width / float(self._map_shape[1])))

                    pygame.draw.circle(surface, color, (w_pos, h_pos), 3, 0)

            self._display.blit(surface, (self._window_width, 0))

        self._render_iter += 1
        pygame.display.flip()