Пример #1
0
class TestDatasetMethods(unittest.TestCase):
    @classmethod
    def setUpClass(cls):
        # run these once as they take time
        cls.dummy_rgb = np.zeros((150, 150, 3), dtype=np.float32)
        cls.dummy_depth = np.zeros((150, 150), dtype=np.float32)
        cls.dummy_pose = Transform()

    def setUp(self):
        self.populated_dataset = Dataset("data")
        for i in range(10):
            self.populated_dataset.add_pose(self.dummy_rgb, self.dummy_depth, self.dummy_pose)

    def tearDown(self):
        pass

    def test_it_should_have_size_0_at_init(self):
        dataset = Dataset("data")
        self.assertEqual(dataset.size(), 0)

    def test_it_should_add_sample(self):
        dataset = Dataset("data")
        dataset.add_pose(self.dummy_rgb, self.dummy_depth, self.dummy_pose)
        self.assertEqual(dataset.size(), 1)

    def test_it_should_return_index_after_adding_pose(self):
        dataset = Dataset("data")
        index0 = dataset.add_pose(self.dummy_rgb, self.dummy_depth, self.dummy_pose)
        self.assertEqual(index0, 0)
        index1 = dataset.add_pose(self.dummy_rgb, self.dummy_depth, self.dummy_pose)
        self.assertEqual(index1, 1)

    def test_it_return_0_if_no_pair(self):
        self.assertEqual(self.populated_dataset.pair_size(1), 0)

    def test_it_should_add_pair(self):
        self.populated_dataset.add_pair(self.dummy_rgb, self.dummy_depth, self.dummy_pose, 1)
        self.assertEqual(self.populated_dataset.pair_size(1), 1)
        self.populated_dataset.add_pair(self.dummy_rgb, self.dummy_depth, self.dummy_pose, 1)
        self.assertEqual(self.populated_dataset.pair_size(1), 2)

    def test_it_should_raise_indexerror_if_pose_id_does_not_exists(self):
        self.assertRaises(IndexError, self.populated_dataset.add_pair, self.dummy_rgb, self.dummy_depth,
                          self.dummy_pose, 20)
        self.assertRaises(IndexError, self.populated_dataset.add_pair, self.dummy_rgb, self.dummy_depth,
                          self.dummy_pose, 10)
Пример #2
0
            previous_pose = rotated_pose.copy()
            previous_pose = combine_view_transform(previous_pose,
                                                   inverted_random_transform)

            rgbA, depthA = vpRender.render(previous_pose.transpose())
            bb = compute_2Dboundingbox(previous_pose,
                                       real_dataset.camera,
                                       OBJECT_WIDTH,
                                       scale=(1000, -1000, -1000))
            rgbA, depthA = normalize_scale(rgbA, depthA, bb,
                                           real_dataset.camera, IMAGE_SIZE)
            rgbB, depthB = normalize_scale(rotated_rgb, rotated_depth, bb,
                                           real_dataset.camera, IMAGE_SIZE)

            index = output_dataset.add_pose(rgbA, depthA, previous_pose)
            output_dataset.add_pair(rgbB, depthB, random_transform, index)
            iteration = i * SAMPLE_QUANTITY + j
            sys.stdout.write(
                "Progress: %d%%   \r" %
                (int(iteration /
                     (SAMPLE_QUANTITY * real_dataset.size()) * 100)))
            sys.stdout.flush()

            if iteration % 500 == 0:
                output_dataset.dump_images_on_disk()
            if iteration % 5000 == 0:
                output_dataset.save_json_files(metadata)

            if args.verbose:
                show_frames(rgbA, depthA, rgbB, depthB)
Пример #3
0
 def test_it_should_return_index_after_adding_pose(self):
     dataset = Dataset("data")
     index0 = dataset.add_pose(self.dummy_rgb, self.dummy_depth, self.dummy_pose)
     self.assertEqual(index0, 0)
     index1 = dataset.add_pose(self.dummy_rgb, self.dummy_depth, self.dummy_pose)
     self.assertEqual(index1, 1)
Пример #4
0
 def test_it_should_add_sample(self):
     dataset = Dataset("data")
     dataset.add_pose(self.dummy_rgb, self.dummy_depth, self.dummy_pose)
     self.assertEqual(dataset.size(), 1)
Пример #5
0
import cv2
import os

if __name__ == '__main__':
    folder = "/home/mathieu/Dataset/DeepTrack/dragon/"
    dataset_path = os.path.join(folder, "train_raw_real")
    new_dataset_path = os.path.join(folder, "train_raw_real_resized")
    if not os.path.exists(new_dataset_path):
        os.mkdir(new_dataset_path)

    dataset = Dataset(dataset_path)
    if not dataset.load():
        print("[Error]: Train dataset empty")
        sys.exit(-1)

    new_dataset = Dataset(new_dataset_path)
    new_dataset.camera = dataset.camera.copy()
    new_dataset.camera.set_ratio(2)
    for i in range(dataset.size()):
        rgb, depth, pose = dataset.load_image(i)
        new_rgb = cv2.resize(
            rgb, (new_dataset.camera.width, new_dataset.camera.height))
        new_depth = cv2.resize(
            depth, (new_dataset.camera.width, new_dataset.camera.height))
        new_dataset.add_pose(new_rgb, new_depth, pose)
        if i % (1 * dataset.size() / 100) == 0:
            print("Progress : {}%".format(i * 100 / dataset.size()))
    new_dataset.set_save_type(dataset.metadata["save_type"])
    new_dataset.dump_images_on_disk()
    new_dataset.save_json_files(dataset.metadata)
Пример #6
0
        for other_dataset in datasets:
            if dataset_check.metadata != other_dataset.metadata:
                print(dataset_check.metadata)
                raise RuntimeError(
                    "Dataset {} have different metadata than {}\n{}\n{}".
                    format(dataset_check.path, other_dataset.path,
                           dataset_check.metadata, other_dataset.metadata))

    metadata = datasets[0].metadata
    camera = datasets[0].camera
    output_dataset = Dataset(output_path, frame_class=metadata["save_type"])
    output_dataset.camera = camera
    output_dataset.metadata = metadata

    # transfer data
    for dataset in datasets:
        print("Process dataset {}".format(dataset.path))
        for i in tqdm(range(dataset.size())):
            rgbA, depthA, initial_pose = dataset.load_image(i)
            rgbB, depthB, transformed_pose = dataset.load_pair(i, 0)

            index = output_dataset.add_pose(rgbA, depthA, initial_pose)
            output_dataset.add_pair(rgbB, depthB, transformed_pose, index)

            if i % 500 == 0:
                output_dataset.dump_images_on_disk()
            if i % 5000 == 0:
                output_dataset.save_json_files(metadata)

    output_dataset.dump_images_on_disk()
    output_dataset.save_json_files(metadata)
Пример #7
0
        dataset.load()
        offset_path = os.path.join(dataset.path, "offset.npy")
        if os.path.exists(offset_path):
            detection_offset = Transform.from_matrix(np.load(offset_path))
            lock_offset = True

    while True:
        start_time = time.time()
        bgr, depth = sensor.get_frame()
        bgr = cv2.resize(bgr, (int(1920 / ratio), int(1080 / ratio)))
        depth = cv2.resize(depth, (int(1920 / ratio), int(1080 / ratio)))
        screen = bgr.copy()

        if rgbd_record:
            # here we add a dummy pose, we will compute the pose as a post operation
            dataset.add_pose(bgr, depth, Transform())
        else:
            detection = detector.detect(screen)
            # Draw a color rectangle around screen : red no detection, green strong detection
            color_ = lerp(detector.get_likelihood(), 1, np.array([255, 0, 0]),
                          np.array([0, 255, 0]))
            cv2.rectangle(screen,
                          (0, 0), (int(1920 / ratio), int(1080 / ratio)),
                          tuple(color_), 10)
            if detection:
                # Add objects offset
                detection.combine(detection_offset.inverse())
                if args.verbose:
                    show_occlusion(detection, screen, depth, camera,
                                   int(MODELS[0]["object_width"]))
                if save_next_rgbd_pose:
Пример #8
0
                (-ROTATION_RANGE, ROTATION_RANGE))
            pair = combine_view_transform(random_pose, random_transform)

            rgbA, depthA = vpRender.render(random_pose.transpose())
            rgbB, depthB = vpRender.render(pair.transpose(),
                                           sphere_sampler.random_direction())
            bb = compute_2Dboundingbox(random_pose,
                                       dataset.camera,
                                       OBJECT_WIDTH,
                                       scale=(1000, -1000, -1000))
            rgbA, depthA = normalize_scale(rgbA, depthA, bb, dataset.camera,
                                           IMAGE_SIZE)
            rgbB, depthB = normalize_scale(rgbB, depthB, bb, dataset.camera,
                                           IMAGE_SIZE)

            index = dataset.add_pose(rgbA, depthA, random_pose)
            dataset.add_pair(rgbB, depthB, random_transform, index)

            if i % 500 == 0:
                dataset.dump_images_on_disk()
            if i % 5000 == 0:
                dataset.save_json_files(metadata)

            if args.verbose:
                show_frames(rgbA, depthA, rgbB, depthB)
            cv2.imshow("testB", rgbB[:, :, ::-1])
            k = cv2.waitKey(1)
            if k == ESCAPE_KEY:
                break
    dataset.dump_images_on_disk()
    dataset.save_json_files(metadata)
Пример #9
0
    files = [
        f for f in os.listdir(dataset_path) if
        os.path.splitext(f)[-1] == ".png" and 'd' not in os.path.splitext(f)[0]
    ]
    detector = ArucoDetector(camera, detector_path)
    window = InitOpenGL(camera.width, camera.height)
    vpRender = ModelRenderer(model_path, shader_path, camera, window,
                             (camera.width, camera.height))
    vpRender.load_ambiant_occlusion_map(model_ao_path)
    ground_truth_pose = None

    for i in range(len(files)):
        img = cv2.imread(os.path.join(dataset.path, "{}.png".format(i)))
        detection = detector.detect(img)
        if detection is not None:
            ground_truth_pose = detection
            ground_truth_pose.combine(offset.inverse(), copy=False)
        else:
            print(
                "[WARN]: frame {} has not been detected.. using previous detection"
                .format(i))
        dataset.add_pose(None, None, ground_truth_pose)
        rgb_render, depth_render = vpRender.render(
            ground_truth_pose.transpose())
        bgr_render = rgb_render[:, :, ::-1].copy()
        img = image_blend(bgr_render, img)

        cv2.imshow("view", img)
        cv2.waitKey(1)
    dataset.save_json_files({"save_type": "png"})