def setUp(self) -> None: current_folder = os.path.dirname(os.path.abspath(__file__)) dataset_folder = os.path.join(os.path.dirname(current_folder), "datasets", "kitti") data_module_factory = KittiDataModuleFactory(range(0, 301, 1), directory=dataset_folder) self._data_module = data_module_factory.make_dataset_manager( final_image_size=(128, 384), transform_manager_parameters={"filters": True}, batch_size=1, num_workers=WORKERS_COUNT, split=(0.8, 0.1, 0.1)) self._data_module = DataModuleMock(self._data_module) pose_net = PoseNetResNet() depth_net = DepthNetResNet() criterion = UnsupervisedCriterion( self._data_module.get_cameras_calibration(), 1, 1) params = AttributeDict( lr=1e-3, beta1=0.99, beta2=0.9, scale_lr=1e-3, initial_log_scale=0., initial_log_min_depth=0., initial_log_pose_scale=0., ) self._model = ScaledUnsupervisedDepthModel(params, pose_net, depth_net, criterion).cuda()
def setUp(self) -> None: current_folder = os.path.dirname(os.path.abspath(__file__)) dataset_folder = os.path.join(os.path.dirname(current_folder), "datasets", "kitti") data_module_factory = KittiDataModuleFactory(range(0, 301, 1), directory=dataset_folder) self._data_module = data_module_factory.make_dataset_manager( final_image_size=(128, 384), transform_manager_parameters={"filters": True}, batch_size=20, num_workers=WORKERS_COUNT, split=(0.8, 0.1, 0.1)) self._lengths = (240, 30, 30)
def setUp(self) -> None: current_folder = os.path.dirname(os.path.abspath(__file__)) dataset_folder = os.path.join(os.path.dirname(current_folder), "datasets", "kitti") data_module_factory = KittiDataModuleFactory(range(0, 301, 1), directory=dataset_folder) self._data_module = data_module_factory.make_dataset_manager( final_image_size=(128, 384), transform_manager_parameters={"filters": True}, batch_size=1, num_workers=WORKERS_COUNT, split=(0.8, 0.1, 0.1)) self._data_module = DataModuleMock(self._data_module) pose_net = PoseNetResNet() depth_net = DepthNetResNet() criterion = UnsupervisedCriterion( self._data_module.get_cameras_calibration(), 1, 1) result_visualizer = ResultVisualizer( cameras_calibration=self._data_module.get_cameras_calibration()) params = AttributeDict(lr=1e-3, beta1=0.99, beta2=0.9) self._model = UnsupervisedDepthModel( params, pose_net, depth_net, criterion, result_visualizer=result_visualizer).cuda() self._tb_logger = TensorBoardLogger('logs/') self._second_tb_logger = TensorBoardLogger('logs1/') self._double_tb_logger = LoggerCollection( [self._tb_logger, self._second_tb_logger]) os.environ[ "MLFLOW_S3_ENDPOINT_URL"] = "http://ec2-3-134-104-174.us-east-2.compute.amazonaws.com:9000" os.environ["AWS_ACCESS_KEY_ID"] = "depth" os.environ["AWS_SECRET_ACCESS_KEY"] = "depth123" self._mlflow_logger = MLFlowLogger( experiment_name="test", tracking_uri= "http://ec2-3-134-104-174.us-east-2.compute.amazonaws.com:5001")
def setUp(self) -> None: params = AttributeDict(image_size=(128, 384), batch_size=1, transform_filters=True, split=(0.8, 0.1, 0.1), num_workers=WORKERS_COUNT, detach=True, levels=(1, ), inner_lambda_s=0.15, lr=1e-3, beta1=0.99, beta2=0.9) current_folder = os.path.dirname(os.path.abspath(__file__)) dataset_folder = os.path.join(os.path.dirname(current_folder), "datasets", "kitti") data_module_factory = KittiDataModuleFactory(range(0, 301, 1), directory=dataset_folder) self._data_module = data_module_factory.make_data_module_from_params( params) self._data_module = DataModuleMock(self._data_module) self._model = MultiUnsupervisedDepthModelFactory().make_model( params, self._data_module.get_cameras_calibration())
TensorBoardLogger("lightning_logs"), MLFlowLogger(experiment_name=arguments.experiment_name, tracking_uri=mlflow_url) ]) # Make trainer trainer = pl.Trainer.from_argparse_args(arguments, logger=logger) # Make data model factory if arguments.frames is not None: frames = arguments.frames.split(",") frames = [int(x) for x in frames] frames = range(*frames) else: frames = None data_model_factory = KittiDataModuleFactory(frames, arguments.sequences, arguments.dataset) # Load parameters params = load_hparams_from_yaml(arguments.config) params = AttributeDict(params) print("Load model from params \n" + str(params)) data_model = data_model_factory.make_data_module_from_params(params) model = MultiUnsupervisedDepthModelFactory().make_model( params, data_model.get_cameras_calibration()) if arguments.load_model: print("Load checkpoint") load_undeepvo_checkpoint(model, arguments.model_checkpoint) print("Start training") trainer.fit(model, data_model)