def setUp(self) -> None: current_folder = os.path.dirname(os.path.abspath(__file__)) dataset_folder = os.path.join(os.path.dirname(current_folder), "datasets", "kitti") data_module_factory = KittiDataModuleFactory(range(0, 301, 1), directory=dataset_folder) self._data_module = data_module_factory.make_dataset_manager( final_image_size=(128, 384), transform_manager_parameters={"filters": True}, batch_size=1, num_workers=WORKERS_COUNT, split=(0.8, 0.1, 0.1)) self._data_module = DataModuleMock(self._data_module) pose_net = PoseNetResNet() depth_net = DepthNetResNet() criterion = UnsupervisedCriterion( self._data_module.get_cameras_calibration(), 1, 1) params = AttributeDict( lr=1e-3, beta1=0.99, beta2=0.9, scale_lr=1e-3, initial_log_scale=0., initial_log_min_depth=0., initial_log_pose_scale=0., ) self._model = ScaledUnsupervisedDepthModel(params, pose_net, depth_net, criterion).cuda()
def setUp(self) -> None: current_folder = os.path.dirname(os.path.abspath(__file__)) dataset_folder = os.path.join( os.path.dirname(current_folder), "datasets", "tum_rgbd", "rgbd_dataset_freiburg3_large_cabinet_validation") data_module_factory = TumVideoDataModuleFactory(dataset_folder) self._data_module = data_module_factory.make_data_module( final_image_size=(128, 384), transform_manager_parameters={"filters": True}, batch_size=1, num_workers=WORKERS_COUNT, split=(0.8, 0.1, 0.1), device="cuda:0") self._data_module = DataModuleMock(self._data_module) pose_net = PoseNetResNet() depth_net = DepthNetResNet() criterion = MonoUnsupervisedCriterion( self._data_module.get_cameras_calibration(), 1, 1) params = AttributeDict(lr=1e-3, beta1=0.99, beta2=0.9) self._model = UnsupervisedDepthModel(params, pose_net, depth_net, criterion, stereo=False, mono=True).cuda()
def setUp(self) -> None: current_folder = os.path.dirname(os.path.abspath(__file__)) dataset_folder = os.path.join( os.path.dirname(current_folder), "datasets", "tum_rgbd", "rgbd_dataset_freiburg3_large_cabinet_validation") data_module_factory = TumValidationDataModuleFactory(dataset_folder) self._data_module = data_module_factory.make_data_module( final_image_size=(128, 384), batch_size=1, num_workers=WORKERS_COUNT, ) self._data_module = DataModuleMock(self._data_module) depth_net = DepthNetResNet() self._model = DepthEvaluationModel(depth_net, DepthMetric()).cuda()
class TestUnsupervisedDepthModel(unittest.TestCase): def setUp(self) -> None: params = AttributeDict(image_size=(128, 384), batch_size=1, transform_filters=True, split=(0.8, 0.1, 0.1), num_workers=WORKERS_COUNT, detach=True, levels=(1, ), inner_lambda_s=0.15, lr=1e-3, beta1=0.99, beta2=0.9) current_folder = os.path.dirname(os.path.abspath(__file__)) dataset_folder = os.path.join(os.path.dirname(current_folder), "datasets", "kitti") data_module_factory = KittiDataModuleFactory(range(0, 301, 1), directory=dataset_folder) self._data_module = data_module_factory.make_data_module_from_params( params) self._data_module = DataModuleMock(self._data_module) self._model = MultiUnsupervisedDepthModelFactory().make_model( params, self._data_module.get_cameras_calibration()) # noinspection PyTypeChecker def test_unsupervised_depth_model(self): logger = TensorBoardLogger("lightning_logs") trainer = pl.Trainer(logger=logger, max_epochs=1, gpus=1, progress_bar_refresh_rate=20) trainer.fit(self._model, self._data_module)
class TestUnsupervisedDepthModel(unittest.TestCase): def setUp(self) -> None: current_folder = os.path.dirname(os.path.abspath(__file__)) dataset_folder = os.path.join(os.path.dirname(current_folder), "datasets", "kitti") data_module_factory = KittiDataModuleFactory(range(0, 301, 1), directory=dataset_folder) self._data_module = data_module_factory.make_dataset_manager( final_image_size=(128, 384), transform_manager_parameters={"filters": True}, batch_size=1, num_workers=WORKERS_COUNT, split=(0.8, 0.1, 0.1)) self._data_module = DataModuleMock(self._data_module) pose_net = PoseNetResNet() depth_net = DepthNetResNet() criterion = UnsupervisedCriterion( self._data_module.get_cameras_calibration(), 1, 1) params = AttributeDict(lr=1e-3, beta1=0.99, beta2=0.9) self._model = UnsupervisedDepthModel(params, pose_net, depth_net, criterion).cuda() def test_unsupervised_depth_model(self): tb_logger = pl.loggers.TensorBoardLogger('logs/') checkpoint_callback = pl.callbacks.ModelCheckpoint( filepath="checkpoints") trainer = pl.Trainer(logger=tb_logger, max_epochs=1, gpus=1, progress_bar_refresh_rate=20, checkpoint_callback=checkpoint_callback) trainer.fit(self._model, self._data_module)
def setUp(self) -> None: current_folder = os.path.dirname(os.path.abspath(__file__)) dataset_folder = os.path.join(os.path.dirname(current_folder), "datasets", "kitti") data_module_factory = KittiDataModuleFactory(range(0, 301, 1), directory=dataset_folder) self._data_module = data_module_factory.make_dataset_manager( final_image_size=(128, 384), transform_manager_parameters={"filters": True}, batch_size=1, num_workers=WORKERS_COUNT, split=(0.8, 0.1, 0.1)) self._data_module = DataModuleMock(self._data_module) pose_net = PoseNetResNet() depth_net = DepthNetResNet() criterion = UnsupervisedCriterion( self._data_module.get_cameras_calibration(), 1, 1) result_visualizer = ResultVisualizer( cameras_calibration=self._data_module.get_cameras_calibration()) params = AttributeDict(lr=1e-3, beta1=0.99, beta2=0.9) self._model = UnsupervisedDepthModel( params, pose_net, depth_net, criterion, result_visualizer=result_visualizer).cuda() self._tb_logger = TensorBoardLogger('logs/') self._second_tb_logger = TensorBoardLogger('logs1/') self._double_tb_logger = LoggerCollection( [self._tb_logger, self._second_tb_logger]) os.environ[ "MLFLOW_S3_ENDPOINT_URL"] = "http://ec2-3-134-104-174.us-east-2.compute.amazonaws.com:9000" os.environ["AWS_ACCESS_KEY_ID"] = "depth" os.environ["AWS_SECRET_ACCESS_KEY"] = "depth123" self._mlflow_logger = MLFlowLogger( experiment_name="test", tracking_uri= "http://ec2-3-134-104-174.us-east-2.compute.amazonaws.com:5001")
def setUp(self) -> None: params = AttributeDict(image_size=(128, 384), batch_size=1, transform_filters=True, split=(0.8, 0.1, 0.1), num_workers=WORKERS_COUNT, detach=True, levels=(1, ), inner_lambda_s=0.15, lr=1e-3, beta1=0.99, beta2=0.9) current_folder = os.path.dirname(os.path.abspath(__file__)) dataset_folder = os.path.join(os.path.dirname(current_folder), "datasets", "kitti") data_module_factory = KittiDataModuleFactory(range(0, 301, 1), directory=dataset_folder) self._data_module = data_module_factory.make_data_module_from_params( params) self._data_module = DataModuleMock(self._data_module) self._model = MultiUnsupervisedDepthModelFactory().make_model( params, self._data_module.get_cameras_calibration())
class TestUnsupervisedDepthModel(unittest.TestCase): def setUp(self) -> None: current_folder = os.path.dirname(os.path.abspath(__file__)) dataset_folder = os.path.join( os.path.dirname(current_folder), "datasets", "tum_rgbd", "rgbd_dataset_freiburg3_long_office_household") data_module_factory = TumVideoDataModuleFactory(dataset_folder, use_poses=True) self._data_module = data_module_factory.make_data_module( final_image_size=(128, 384), transform_manager_parameters={"filters": True}, batch_size=1, num_workers=WORKERS_COUNT, split=(0.8, 0.1, 0.1), device="cuda:0") self._data_module = DataModuleMock(self._data_module) pose_net = PoseNetResNet() depth_net = DepthNetResNet() criterion = MonoUnsupervisedCriterion( self._data_module.get_cameras_calibration(), 1, 1) params = AttributeDict(lr=1e-3, beta1=0.99, beta2=0.9) self._model = UnsupervisedDepthModel( params, pose_net, depth_net, criterion, stereo=False, mono=True, use_ground_truth_poses=True).cuda() def test_unsupervised_depth_model(self): tb_logger = pl.loggers.TensorBoardLogger('logs/') checkpoint_callback = pl.callbacks.ModelCheckpoint( filepath="checkpoints") trainer = pl.Trainer(logger=tb_logger, max_epochs=1, gpus=1, progress_bar_refresh_rate=20, checkpoint_callback=checkpoint_callback) trainer.fit(self._model, self._data_module)
class TestDepthEvaluationModel(unittest.TestCase): def setUp(self) -> None: current_folder = os.path.dirname(os.path.abspath(__file__)) dataset_folder = os.path.join( os.path.dirname(current_folder), "datasets", "tum_rgbd", "rgbd_dataset_freiburg3_large_cabinet_validation") data_module_factory = TumValidationDataModuleFactory(dataset_folder) self._data_module = data_module_factory.make_data_module( final_image_size=(128, 384), batch_size=1, num_workers=WORKERS_COUNT, ) self._data_module = DataModuleMock(self._data_module) depth_net = DepthNetResNet() self._model = DepthEvaluationModel(depth_net, DepthMetric()).cuda() def test_evaluation_model(self): tb_logger = pl.loggers.TensorBoardLogger('logs/') trainer = pl.Trainer(logger=tb_logger, max_epochs=1, gpus=1, progress_bar_refresh_rate=20) trainer.test(self._model, self._data_module.test_dataloader())