Esempio n. 1
0
    def setUp(self) -> None:
        current_folder = os.path.dirname(os.path.abspath(__file__))
        dataset_folder = os.path.join(
            os.path.dirname(current_folder), "datasets", "tum_rgbd",
            "rgbd_dataset_freiburg3_large_cabinet_validation")
        data_module_factory = TumVideoDataModuleFactory(dataset_folder)
        self._data_module = data_module_factory.make_data_module(
            final_image_size=(128, 384),
            transform_manager_parameters={"filters": True},
            batch_size=1,
            num_workers=WORKERS_COUNT,
            split=(0.8, 0.1, 0.1),
            device="cuda:0")
        self._data_module = DataModuleMock(self._data_module)

        pose_net = PoseNetResNet()
        depth_net = DepthNetResNet()
        criterion = MonoUnsupervisedCriterion(
            self._data_module.get_cameras_calibration(), 1, 1)

        params = AttributeDict(lr=1e-3, beta1=0.99, beta2=0.9)
        self._model = UnsupervisedDepthModel(params,
                                             pose_net,
                                             depth_net,
                                             criterion,
                                             stereo=False,
                                             mono=True).cuda()
Esempio n. 2
0
    def test_unfreeze_last_layer(self):
        params = AttributeDict(
            lr=1e-4,
            beta1=0.9,
            beta2=0.99,
            lambda_position=0.01,
            lambda_rotation=0.1,
            batch_size=8,
        )

        pose_net = PoseNetResNet()
        depth_net = DepthNetResNet()

        params.update(scale_lr=5e-1,
                      initial_log_scale=4.59,
                      initial_log_min_depth=0.)

        model = ScaledUnsupervisedDepthModel(params,
                                             pose_net,
                                             depth_net,
                                             criterion=None)

        freeze_feature_extractor(model)
        unfreeze_last_layer(model)

        self.assertTrue(model._depth_net._last_conv.requires_grad)
        self.assertTrue(model._pose_net.rot3.requires_grad)
        self.assertTrue(model._pose_net.transl3.requires_grad)
Esempio n. 3
0
    def setUp(self) -> None:
        current_folder = os.path.dirname(os.path.abspath(__file__))
        dataset_folder = os.path.join(os.path.dirname(current_folder),
                                      "datasets", "kitti")
        data_module_factory = KittiDataModuleFactory(range(0, 301, 1),
                                                     directory=dataset_folder)
        self._data_module = data_module_factory.make_dataset_manager(
            final_image_size=(128, 384),
            transform_manager_parameters={"filters": True},
            batch_size=1,
            num_workers=WORKERS_COUNT,
            split=(0.8, 0.1, 0.1))
        self._data_module = DataModuleMock(self._data_module)

        pose_net = PoseNetResNet()
        depth_net = DepthNetResNet()
        criterion = UnsupervisedCriterion(
            self._data_module.get_cameras_calibration(), 1, 1)

        params = AttributeDict(
            lr=1e-3,
            beta1=0.99,
            beta2=0.9,
            scale_lr=1e-3,
            initial_log_scale=0.,
            initial_log_min_depth=0.,
            initial_log_pose_scale=0.,
        )
        self._model = ScaledUnsupervisedDepthModel(params, pose_net, depth_net,
                                                   criterion).cuda()
Esempio n. 4
0
    def test_load_undeepvo_checkpoint(self):
        filename = 'checkpoint_undeepvo.pth'
        # subprocess.run('checkpoint_download.sh')

        params = AttributeDict(
            lr=1e-4,
            beta1=0.9,
            beta2=0.99,
            lambda_position=0.01,
            lambda_rotation=0.1,
            batch_size=8,
        )

        pose_net = PoseNetResNet()
        depth_net = DepthNetResNet()

        params.update(scale_lr=5e-1,
                      initial_log_scale=4.59,
                      initial_log_min_depth=0.)

        model = ScaledUnsupervisedDepthModel(params,
                                             pose_net,
                                             depth_net,
                                             criterion=None)

        model_before = copy.deepcopy(model)
        load_undeepvo_checkpoint(model, filename)
        self.assertTrue(
            torch.any(model_before._pose_net._first_layer.weight !=
                      model._pose_net._first_layer.weight))
        self.assertTrue(
            torch.any(model_before._depth_net.skip_zero.weight !=
                      model._depth_net.skip_zero.weight))
Esempio n. 5
0
    def setUp(self) -> None:
        current_folder = os.path.dirname(os.path.abspath(__file__))
        dataset_folder = os.path.join(os.path.dirname(current_folder),
                                      "datasets", "kitti")
        data_module_factory = KittiDataModuleFactory(range(0, 301, 1),
                                                     directory=dataset_folder)
        self._data_module = data_module_factory.make_dataset_manager(
            final_image_size=(128, 384),
            transform_manager_parameters={"filters": True},
            batch_size=1,
            num_workers=WORKERS_COUNT,
            split=(0.8, 0.1, 0.1))
        self._data_module = DataModuleMock(self._data_module)

        pose_net = PoseNetResNet()
        depth_net = DepthNetResNet()
        criterion = UnsupervisedCriterion(
            self._data_module.get_cameras_calibration(), 1, 1)

        result_visualizer = ResultVisualizer(
            cameras_calibration=self._data_module.get_cameras_calibration())
        params = AttributeDict(lr=1e-3, beta1=0.99, beta2=0.9)
        self._model = UnsupervisedDepthModel(
            params,
            pose_net,
            depth_net,
            criterion,
            result_visualizer=result_visualizer).cuda()
        self._tb_logger = TensorBoardLogger('logs/')
        self._second_tb_logger = TensorBoardLogger('logs1/')
        self._double_tb_logger = LoggerCollection(
            [self._tb_logger, self._second_tb_logger])

        os.environ[
            "MLFLOW_S3_ENDPOINT_URL"] = "http://ec2-3-134-104-174.us-east-2.compute.amazonaws.com:9000"
        os.environ["AWS_ACCESS_KEY_ID"] = "depth"
        os.environ["AWS_SECRET_ACCESS_KEY"] = "depth123"
        self._mlflow_logger = MLFlowLogger(
            experiment_name="test",
            tracking_uri=
            "http://ec2-3-134-104-174.us-east-2.compute.amazonaws.com:5001")