class TestDatasetMethods(unittest.TestCase): @classmethod def setUpClass(cls): # run these once as they take time cls.dummy_rgb = np.zeros((150, 150, 3), dtype=np.float32) cls.dummy_depth = np.zeros((150, 150), dtype=np.float32) cls.dummy_pose = Transform() def setUp(self): self.populated_dataset = Dataset("data") for i in range(10): self.populated_dataset.add_pose(self.dummy_rgb, self.dummy_depth, self.dummy_pose) def tearDown(self): pass def test_it_should_have_size_0_at_init(self): dataset = Dataset("data") self.assertEqual(dataset.size(), 0) def test_it_should_add_sample(self): dataset = Dataset("data") dataset.add_pose(self.dummy_rgb, self.dummy_depth, self.dummy_pose) self.assertEqual(dataset.size(), 1) def test_it_should_return_index_after_adding_pose(self): dataset = Dataset("data") index0 = dataset.add_pose(self.dummy_rgb, self.dummy_depth, self.dummy_pose) self.assertEqual(index0, 0) index1 = dataset.add_pose(self.dummy_rgb, self.dummy_depth, self.dummy_pose) self.assertEqual(index1, 1) def test_it_return_0_if_no_pair(self): self.assertEqual(self.populated_dataset.pair_size(1), 0) def test_it_should_add_pair(self): self.populated_dataset.add_pair(self.dummy_rgb, self.dummy_depth, self.dummy_pose, 1) self.assertEqual(self.populated_dataset.pair_size(1), 1) self.populated_dataset.add_pair(self.dummy_rgb, self.dummy_depth, self.dummy_pose, 1) self.assertEqual(self.populated_dataset.pair_size(1), 2) def test_it_should_raise_indexerror_if_pose_id_does_not_exists(self): self.assertRaises(IndexError, self.populated_dataset.add_pair, self.dummy_rgb, self.dummy_depth, self.dummy_pose, 20) self.assertRaises(IndexError, self.populated_dataset.add_pair, self.dummy_rgb, self.dummy_depth, self.dummy_pose, 10)
previous_pose = rotated_pose.copy() previous_pose = combine_view_transform(previous_pose, inverted_random_transform) rgbA, depthA = vpRender.render(previous_pose.transpose()) bb = compute_2Dboundingbox(previous_pose, real_dataset.camera, OBJECT_WIDTH, scale=(1000, -1000, -1000)) rgbA, depthA = normalize_scale(rgbA, depthA, bb, real_dataset.camera, IMAGE_SIZE) rgbB, depthB = normalize_scale(rotated_rgb, rotated_depth, bb, real_dataset.camera, IMAGE_SIZE) index = output_dataset.add_pose(rgbA, depthA, previous_pose) output_dataset.add_pair(rgbB, depthB, random_transform, index) iteration = i * SAMPLE_QUANTITY + j sys.stdout.write( "Progress: %d%% \r" % (int(iteration / (SAMPLE_QUANTITY * real_dataset.size()) * 100))) sys.stdout.flush() if iteration % 500 == 0: output_dataset.dump_images_on_disk() if iteration % 5000 == 0: output_dataset.save_json_files(metadata) if args.verbose: show_frames(rgbA, depthA, rgbB, depthB) cv2.imshow("testB", rgbB[:, :, ::-1])
for other_dataset in datasets: if dataset_check.metadata != other_dataset.metadata: print(dataset_check.metadata) raise RuntimeError( "Dataset {} have different metadata than {}\n{}\n{}". format(dataset_check.path, other_dataset.path, dataset_check.metadata, other_dataset.metadata)) metadata = datasets[0].metadata camera = datasets[0].camera output_dataset = Dataset(output_path, frame_class=metadata["save_type"]) output_dataset.camera = camera output_dataset.metadata = metadata # transfer data for dataset in datasets: print("Process dataset {}".format(dataset.path)) for i in tqdm(range(dataset.size())): rgbA, depthA, initial_pose = dataset.load_image(i) rgbB, depthB, transformed_pose = dataset.load_pair(i, 0) index = output_dataset.add_pose(rgbA, depthA, initial_pose) output_dataset.add_pair(rgbB, depthB, transformed_pose, index) if i % 500 == 0: output_dataset.dump_images_on_disk() if i % 5000 == 0: output_dataset.save_json_files(metadata) output_dataset.dump_images_on_disk() output_dataset.save_json_files(metadata)