def save_data(self, data, framenum):
        idx = 0
        for v in range(self.num_vehicles):
            vehicle_path = self.vehicle_dir_paths[v]
            
            d = {}
            d["num_camRs"] = self.num_camRs
            d["vehicle_names"] = self.vehicle_names
            d["bounding_box"] = self.vehicle_bbox_list
            d["vehicle_extrinsics"] = self.vehicle_extrinsics
            for j in self.sensor_names:
                d["{}_data".format(carla_utils.get_sensor_name(j))] = []
                d["{}_extrinsics".format(carla_utils.get_sensor_name(j))] = []
                d["{}_cam_to_car_transform_locs".format(carla_utils.get_sensor_name(j))] = []
                d["{}_cam_to_car_transform_rots".format(carla_utils.get_sensor_name(j))] = []

            for i in range(self.num_locations_per_vehicle):
                for j in self.sensor_names:
                    data_instance = data[idx]
                    if "rgb" in j:
                        processed_data = image_converter.to_rgb_array(data_instance)
                    elif "depth" in j:
                        processed_data = image_converter.depth_in_meters(data_instance)
                    elif "semantic" in j:
                        # TODO: handle R channel properly here.
                        processed_data = image_converter.to_rgb_array(data_instance)
                    else:
                        print("Invalid sensor type %s. Quitting" %j)
                        exit(1)

                    # print("The transform for this camera is: ", self.sensors_list[idx].get_transform())
                    # transform = carla.Transform(carla.Location(x=20.5, y=0, z=0))
                    # https://github.com/carla-simulator/carla/issues/857
                    # self.sensors_list[idx].set_transform(transform)
                    # self.world.tick()
                    # print("The new transform for this camera is: ", self.sensors_list[idx].get_transform())
                    # print("The transform for the vehicle is: ", self.vehicles_list[v].get_transform())
                    d["{}_data".format(carla_utils.get_sensor_name(j))].append(processed_data)
                    d["{}_extrinsics".format(carla_utils.get_sensor_name(j))].append(carla_utils.get_extrinsics_for_data(data_instance))

                    d["{}_cam_to_car_transform_locs".format(carla_utils.get_sensor_name(j))].append(self.position_list[idx])
                    d["{}_cam_to_car_transform_rots".format(carla_utils.get_sensor_name(j))].append(self.rotation_list[idx])
                    idx += 1
            
            for j in self.sensor_names:
                d["{}_data".format(carla_utils.get_sensor_name(j))] = np.stack(d["{}_data".format(carla_utils.get_sensor_name(j))])
                d["{}_extrinsics".format(carla_utils.get_sensor_name(j))] = np.stack(d["{}_extrinsics".format(carla_utils.get_sensor_name(j))])
                d["{}_cam_to_car_transform_locs".format(carla_utils.get_sensor_name(j))] = np.stack(d["{}_cam_to_car_transform_locs".format(carla_utils.get_sensor_name(j))])
                d["{}_cam_to_car_transform_rots".format(carla_utils.get_sensor_name(j))] = np.stack(d["{}_cam_to_car_transform_rots".format(carla_utils.get_sensor_name(j))])

            pickle_fname = "vehicle-{}_frame-{}.p".format(v, framenum)
            print("Saving data in pickle file: %s" %pickle_fname)
            with open(os.path.join(vehicle_path, pickle_fname), 'wb') as f:
                pickle.dump(d, f)
            break
Esempio n. 2
0
def main():
    pygame.init()
    display = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT),
                                      pygame.HWSURFACE | pygame.DOUBLEBUF)
    font = get_font()
    clock = pygame.time.Clock()

    with SynchronyModel() as sync_mode:
        try:
            step = 1
            while True:
                if should_quit():
                    break
                clock.tick()
                snapshot, sync_mode.main_image, sync_mode.depth_image, sync_mode.point_cloud = sync_mode.tick(
                    timeout=2.0)

                image = image_converter.to_rgb_array(sync_mode.main_image)
                sync_mode.extrinsic = np.mat(
                    sync_mode.my_camera.get_transform().get_matrix())
                image, datapoints = sync_mode.generate_datapoints(image)

                if datapoints and step % 100 is 0:
                    data = np.copy(
                        np.frombuffer(sync_mode.point_cloud.raw_data,
                                      dtype=np.dtype('f4')))
                    data = np.reshape(data, (int(data.shape[0] / 4), 4))
                    # Isolate the 3D data
                    points = data[:, :-1]
                    # transform to car space
                    # points = np.append(points, np.ones((points.shape[0], 1)), axis=1)
                    # points = np.dot(sync_mode.player.get_transform().get_matrix(), points.T).T
                    # points = points[:, :-1]
                    # points[:, 2] -= LIDAR_HEIGHT_POS
                    sync_mode._save_training_files(datapoints, points)
                    sync_mode.captured_frame_no += 1

                step = step + 1
                fps = round(1.0 / snapshot.timestamp.delta_seconds)
                draw_image(display, image)
                display.blit(
                    font.render('% 5d FPS (real)' % clock.get_fps(), True,
                                (255, 255, 255)), (8, 10))
                display.blit(
                    font.render('% 5d FPS (simulated)' % fps, True,
                                (255, 255, 255)), (8, 28))
                pygame.display.flip()
        finally:
            print('destroying actors.')
            for actor in sync_mode.actor_list:
                actor.destroy()
            pygame.quit()
            print('done.')
Esempio n. 3
0
def apply_agent_control(image, player, agent):
    """ Function to pack up the data into the necessary format for the agent to process """
    # print out the timestatmp of the image
    meas = Measurements()
    meas.update_measurements(image, player)
    # Do some processing of the image dat
    # Store processed data to pass to agent
    sensor_data = {}
    sensor_data['CameraRGB'] = image_converter.to_rgb_array(image)

    # Process next control step from the agent and execute it
    control = agent.run_step(meas, sensor_data, LANE_FOLLOW, None)

    player.apply_control(control)
Esempio n. 4
0
    def semantic_segmentation_callback(self, image_data):
        '''Receives semantic segmentation data asynchronously and saves it.

        @param image_data: a carla.Image object containing the RGB image.
                              We'll run it through RefineNet and save it here.
        TODO: Confirm label encoding with Lars
        '''
        # Convert the image to an RGB numpy array
        rgb_image = to_rgb_array(image_data)
        # Segment that image
        semantic_data = self.refNet.do_segmentation(rgb_image)

        # Save the depth data along with its frame
        self.semantic_data[image_data.frame] = semantic_data
        self.log("Received semantic data for frame: " + str(image_data.frame))