import cv2
import os

if __name__ == '__main__':
    folder = "/home/mathieu/Dataset/DeepTrack/dragon/"
    dataset_path = os.path.join(folder, "train_raw_real")
    new_dataset_path = os.path.join(folder, "train_raw_real_resized")
    if not os.path.exists(new_dataset_path):
        os.mkdir(new_dataset_path)

    dataset = Dataset(dataset_path)
    if not dataset.load():
        print("[Error]: Train dataset empty")
        sys.exit(-1)

    new_dataset = Dataset(new_dataset_path)
    new_dataset.camera = dataset.camera.copy()
    new_dataset.camera.set_ratio(2)
    for i in range(dataset.size()):
        rgb, depth, pose = dataset.load_image(i)
        new_rgb = cv2.resize(
            rgb, (new_dataset.camera.width, new_dataset.camera.height))
        new_depth = cv2.resize(
            depth, (new_dataset.camera.width, new_dataset.camera.height))
        new_dataset.add_pose(new_rgb, new_depth, pose)
        if i % (1 * dataset.size() / 100) == 0:
            print("Progress : {}%".format(i * 100 / dataset.size()))
    new_dataset.set_save_type(dataset.metadata["save_type"])
    new_dataset.dump_images_on_disk()
    new_dataset.save_json_files(dataset.metadata)
Esempio n. 2
0
                                       OBJECT_WIDTH,
                                       scale=(1000, -1000, -1000))
            rgbA, depthA = normalize_scale(rgbA, depthA, bb,
                                           real_dataset.camera, IMAGE_SIZE)
            rgbB, depthB = normalize_scale(rotated_rgb, rotated_depth, bb,
                                           real_dataset.camera, IMAGE_SIZE)

            index = output_dataset.add_pose(rgbA, depthA, previous_pose)
            output_dataset.add_pair(rgbB, depthB, random_transform, index)
            iteration = i * SAMPLE_QUANTITY + j
            sys.stdout.write(
                "Progress: %d%%   \r" %
                (int(iteration /
                     (SAMPLE_QUANTITY * real_dataset.size()) * 100)))
            sys.stdout.flush()

            if iteration % 500 == 0:
                output_dataset.dump_images_on_disk()
            if iteration % 5000 == 0:
                output_dataset.save_json_files(metadata)

            if args.verbose:
                show_frames(rgbA, depthA, rgbB, depthB)
            cv2.imshow("testB", rgbB[:, :, ::-1])
            k = cv2.waitKey(1)
            if k == ESCAPE_KEY:
                break

    output_dataset.dump_images_on_disk()
    output_dataset.save_json_files(metadata)
Esempio n. 3
0
                detection_offset.rotate(z=math.radians(1))
            elif key == NUM_PAD_8_KEY:
                detection_offset.translate(z=-0.001)
            elif key == NUM_PAD_9_KEY:
                detection_offset.rotate(x=math.radians(1))
            elif key == ARROW_UP_KEY:
                detection_offset.translate(y=-0.001)
            elif key == ARROW_DOWN_KEY:
                detection_offset.translate(y=0.001)
            elif key == ARROW_LEFT_KEY:
                detection_offset.rotate(y=math.radians(-1))
            elif key == ARROW_RIGHT_KEY:
                detection_offset.rotate(y=math.radians(1))
    print("Compute detections")
    for i in range(dataset.size()):
        frame, pose = dataset.data_pose[i]
        # if pose is identity, compute the detection
        if pose == Transform():
            rgb, depth = dataset.data_pose[i][0].get_rgb_depth(dataset.path)
            pose = detector.detect(rgb)
            if detector.get_likelihood() < 0.1:
                print(
                    "[WARNING] : Detector returns uncertain pose at frame {}".
                    format(i))
            #Todo : need better way to handle viewpoint's pose change in dataset...
            dataset.data_pose[i] = (Frame(rgb, depth, str(i)), pose)
    np.save(os.path.join(dataset.path, "offset"), detection_offset.matrix)
    dataset.dump_images_on_disk()
    dataset.save_json_files({"save_type": "png"})
    sensor.stop()