def test_unsupervised_depth_problem_cpu(self):
     device = "cpu"
     sequence_8 = Downloader('08')
     if not os.path.exists("./dataset/poses"):
         print("Download dataset")
         sequence_8.download_sequence()
     lengths = (200, 30, 30)
     dataset = pykitti.odometry(sequence_8.main_dir,
                                sequence_8.sequence_id,
                                frames=range(0, 260, 1))
     dataset_manager = DatasetManagerMock(dataset,
                                          lenghts=lengths,
                                          num_workers=WORKERS_COUNT)
     model = UnDeepVO(max_depth=2., min_depth=1.0).to(device)
     optimizer_manger = OptimizerManager()
     criterion = UnsupervisedCriterion(
         dataset_manager.get_cameras_calibration(device), 0.1, 1, 0.85)
     handler = TrainingProcessHandler(mlflow_tags={"name": "test"})
     problem = UnsupervisedDepthProblem(model,
                                        criterion,
                                        optimizer_manger,
                                        dataset_manager,
                                        handler,
                                        batch_size=1,
                                        device=device)
     problem.train(1)
Example #2
0
 def test_get_cameras_calibration(self):
     sequence_8 = Downloader('08')
     if not os.path.exists("./dataset/poses"):
         print("Download dataset")
         sequence_8.download_sequence()
     lengths = (1, 1, 1)
     dataset = pykitti.odometry(sequence_8.main_dir,
                                sequence_8.sequence_id,
                                frames=range(0, 3, 1))
     dataset_manager = UnsupervisedDatasetManager(dataset,
                                                  lenghts=lengths,
                                                  num_workers=WORKERS_COUNT)
     camera_calibration = dataset_manager.get_cameras_calibration()
     self.assertEqual(camera_calibration.left_camera_matrix.shape,
                      torch.Size([1, 3, 3]))
     self.assertEqual(camera_calibration.right_camera_matrix.shape,
                      torch.Size([1, 3, 3]))
Example #3
0
 def test_pose_loss(self):
     sequence_8 = Downloader('08')
     if not os.path.exists("./dataset/poses"):
         print("Download dataset")
         sequence_8.download_sequence()
     dataset = pykitti.odometry(sequence_8.main_dir,
                                sequence_8.sequence_id,
                                frames=range(0, 3, 1))
     dataset_manager = UnsupervisedDatasetManager(dataset,
                                                  lengths=(1, 1, 1),
                                                  num_workers=WORKERS_COUNT)
     angles = torch.tensor([[1., 1., 1.]]).cuda()
     translation = torch.tensor([[0.1, 0.2, 0.3]]).cuda()
     pose_loss = PoseLoss(
         1, 1,
         dataset_manager.get_cameras_calibration().
         transform_from_left_to_right)
     out = pose_loss(translation, translation, angles, angles)
     self.assertEqual(out.shape, torch.Size([]))
     self.assertGreaterEqual(out, 0)
     self.assertLessEqual(out, 0.01)
Example #4
0
 def test_dataset_manager(self):
     sequence_8 = Downloader('08')
     if not os.path.exists("./dataset/poses"):
         print("Download dataset")
         sequence_8.download_sequence()
     lengths = (200, 30, 30)
     dataset = pykitti.odometry(sequence_8.main_dir,
                                sequence_8.sequence_id,
                                frames=range(0, 260, 1))
     dataset_manager = UnsupervisedDatasetManager(dataset,
                                                  lenghts=lengths,
                                                  num_workers=WORKERS_COUNT)
     self.assertEqual(len(dataset_manager.get_train_dataset()), lengths[0])
     self.assertEqual(len(dataset_manager.get_test_dataset()), lengths[1])
     self.assertEqual(len(dataset_manager.get_validation_dataset()),
                      lengths[2])
     batches = dataset_manager.get_train_batches(20)
     for batch in batches:
         self.assertEqual(batch["left_current_image"].shape,
                          torch.Size([20, 3, 128, 384]))
         self.assertEqual(batch["right_current_image"].shape,
                          torch.Size([20, 3, 128, 384]))
         self.assertEqual(batch["left_next_image"].shape,
                          torch.Size([20, 3, 128, 384]))
         self.assertEqual(batch["right_next_image"].shape,
                          torch.Size([20, 3, 128, 384]))
         self.assertEqual(batch["current_position"].shape,
                          torch.Size([20, 3]))
         self.assertEqual(batch["current_angle"].shape, torch.Size([20, 3]))
         self.assertEqual(batch["next_position"].shape, torch.Size([20, 3]))
         self.assertEqual(batch["next_angle"].shape, torch.Size([20, 3]))
         self.assertEqual(batch["delta_position"].shape, torch.Size([20,
                                                                     3]))
         self.assertEqual(batch["delta_angle"].shape, torch.Size([20, 3]))
         self.assertEqual(batch["right_next_image"].dtype, torch.float32)
         break
     batches = dataset_manager.get_validation_batches(20)
     for batch in batches:
         self.assertEqual(batch["left_current_image"].shape,
                          torch.Size([20, 3, 128, 384]))
         self.assertEqual(batch["right_current_image"].shape,
                          torch.Size([20, 3, 128, 384]))
         self.assertEqual(batch["left_next_image"].shape,
                          torch.Size([20, 3, 128, 384]))
         self.assertEqual(batch["right_next_image"].shape,
                          torch.Size([20, 3, 128, 384]))
         self.assertEqual(batch["current_position"].shape,
                          torch.Size([20, 3]))
         self.assertEqual(batch["current_angle"].shape, torch.Size([20, 3]))
         self.assertEqual(batch["next_position"].shape, torch.Size([20, 3]))
         self.assertEqual(batch["next_angle"].shape, torch.Size([20, 3]))
         self.assertEqual(batch["delta_position"].shape, torch.Size([20,
                                                                     3]))
         self.assertEqual(batch["delta_angle"].shape, torch.Size([20, 3]))
         break
     batches = dataset_manager.get_test_batches(20)
     for batch in batches:
         self.assertEqual(batch["left_current_image"].shape,
                          torch.Size([20, 3, 128, 384]))
         self.assertEqual(batch["right_current_image"].shape,
                          torch.Size([20, 3, 128, 384]))
         self.assertEqual(batch["left_next_image"].shape,
                          torch.Size([20, 3, 128, 384]))
         self.assertEqual(batch["right_next_image"].shape,
                          torch.Size([20, 3, 128, 384]))
         self.assertEqual(batch["current_position"].shape,
                          torch.Size([20, 3]))
         self.assertEqual(batch["current_angle"].shape, torch.Size([20, 3]))
         self.assertEqual(batch["next_position"].shape, torch.Size([20, 3]))
         self.assertEqual(batch["next_angle"].shape, torch.Size([20, 3]))
         self.assertEqual(batch["delta_position"].shape, torch.Size([20,
                                                                     3]))
         self.assertEqual(batch["delta_angle"].shape, torch.Size([20, 3]))
         break
Example #5
0
                    default="cuda:0",
                    type=str,
                    help='whether to use resnet or not')

parser.add_argument('-model_path',
                    default="",
                    type=str,
                    help='whether to use resnet or not')

args = parser.parse_args()

MAIN_DIR = args.main_dir
lengths = args.split
problem = None
if args.method == "unsupervised":
    sequence_8 = Downloader('08')
    if not os.path.exists("./dataset/poses"):
        print("Download dataset")
        sequence_8.download_sequence()
    dataset = pykitti.odometry(MAIN_DIR, '08', frames=range(*args.frames_range))
    dataset_manager = UnsupervisedDatasetManager(dataset, lengths=lengths)

    model = UnDeepVO(args.max_depth, args.min_depth, args.resnet).to(args.device)

    if args.model_path != "":
        model.load_state_dict(torch.load(args.model_path, map_location=args.device))
    criterion = UnsupervisedCriterion(dataset_manager.get_cameras_calibration(args.device),
                                      args.lambda_position,
                                      args.lambda_rotation,
                                      args.lambda_s,
                                      args.lambda_disparity,