Exemple #1
0
    SAMPLE_QUANTITY = int(data["sample_quantity"])
    TRANSLATION_RANGE = float(data["translation_range"])
    ROTATION_RANGE = math.radians(float(data["rotation_range"]))
    SPHERE_MIN_RADIUS = float(data["sphere_min_radius"])
    SPHERE_MAX_RADIUS = float(data["sphere_max_radius"])
    IMAGE_SIZE = (int(data["image_size"]), int(data["image_size"]))
    PRELOAD = data["preload"] == "True"
    SATURATION_THRESHOLD = int(data["saturation_threshold"])

    if not os.path.exists(OUTPUT_PATH):
        os.mkdir(OUTPUT_PATH)

    real_dataset = Dataset(REAL_PATH)
    real_dataset.load()
    camera = Camera.load_from_json(real_dataset.path)
    real_dataset.camera = camera
    output_dataset = Dataset(OUTPUT_PATH, frame_class=data["save_type"])
    output_dataset.camera = camera
    window_size = (real_dataset.camera.width, real_dataset.camera.height)
    window = InitOpenGL(*window_size)

    model = MODELS[0]
    vpRender = ModelRenderer(model["model_path"], SHADER_PATH,
                             real_dataset.camera, window, window_size)
    vpRender.load_ambiant_occlusion_map(model["ambiant_occlusion_model"])
    OBJECT_WIDTH = int(model["object_width"])

    metadata = {}
    metadata["translation_range"] = str(TRANSLATION_RANGE)
    metadata["rotation_range"] = str(ROTATION_RANGE)
    metadata["image_size"] = str(IMAGE_SIZE[0])
        dataset.load()

    # metadata sanity check
    for dataset_check in datasets:
        for other_dataset in datasets:
            if dataset_check.metadata != other_dataset.metadata:
                print(dataset_check.metadata)
                raise RuntimeError(
                    "Dataset {} have different metadata than {}\n{}\n{}".
                    format(dataset_check.path, other_dataset.path,
                           dataset_check.metadata, other_dataset.metadata))

    metadata = datasets[0].metadata
    camera = datasets[0].camera
    output_dataset = Dataset(output_path, frame_class=metadata["save_type"])
    output_dataset.camera = camera
    output_dataset.metadata = metadata

    # transfer data
    for dataset in datasets:
        print("Process dataset {}".format(dataset.path))
        for i in tqdm(range(dataset.size())):
            rgbA, depthA, initial_pose = dataset.load_image(i)
            rgbB, depthB, transformed_pose = dataset.load_pair(i, 0)

            index = output_dataset.add_pose(rgbA, depthA, initial_pose)
            output_dataset.add_pair(rgbB, depthB, transformed_pose, index)

            if i % 500 == 0:
                output_dataset.dump_images_on_disk()
            if i % 5000 == 0:
import cv2
import os

if __name__ == '__main__':
    folder = "/home/mathieu/Dataset/DeepTrack/dragon/"
    dataset_path = os.path.join(folder, "train_raw_real")
    new_dataset_path = os.path.join(folder, "train_raw_real_resized")
    if not os.path.exists(new_dataset_path):
        os.mkdir(new_dataset_path)

    dataset = Dataset(dataset_path)
    if not dataset.load():
        print("[Error]: Train dataset empty")
        sys.exit(-1)

    new_dataset = Dataset(new_dataset_path)
    new_dataset.camera = dataset.camera.copy()
    new_dataset.camera.set_ratio(2)
    for i in range(dataset.size()):
        rgb, depth, pose = dataset.load_image(i)
        new_rgb = cv2.resize(
            rgb, (new_dataset.camera.width, new_dataset.camera.height))
        new_depth = cv2.resize(
            depth, (new_dataset.camera.width, new_dataset.camera.height))
        new_dataset.add_pose(new_rgb, new_depth, pose)
        if i % (1 * dataset.size() / 100) == 0:
            print("Progress : {}%".format(i * 100 / dataset.size()))
    new_dataset.set_save_type(dataset.metadata["save_type"])
    new_dataset.dump_images_on_disk()
    new_dataset.save_json_files(dataset.metadata)
    OUTPUT_PATH = data["output_path"]
    IMAGE_SIZE = (int(data["image_size"]), int(data["image_size"]))
    CAMERA_PATH = data["camera_path"]
    DETECTOR_PATH = data["detector_layout_path"]
    PRELOAD = data["preload"] == "True"
    if not os.path.exists(OUTPUT_PATH):
        os.mkdir(OUTPUT_PATH)

    sensor = Kinect2(CAMERA_PATH)
    camera = sensor.intrinsics()
    ratio = 2
    camera.set_ratio(ratio)
    sensor.start()

    dataset = Dataset(OUTPUT_PATH)
    dataset.camera = camera
    window = InitOpenGL(camera.width, camera.height)
    detector = ArucoDetector(camera, DETECTOR_PATH)
    vpRender = ModelRenderer(MODELS[0]["model_path"], SHADER_PATH, camera,
                             window, (camera.width, camera.height))
    vpRender.load_ambiant_occlusion_map(MODELS[0]["ambiant_occlusion_model"])

    cv2.namedWindow('image')
    cv2.createTrackbar('transparency', 'image', 0, 100, trackbar)

    # todo, read from file?
    detection_offset = Transform()
    rgbd_record = False
    save_next_rgbd_pose = False
    lock_offset = False
    if PRELOAD: