Exemplo n.º 1
0
    def parse(self, args=None, namespace=None):
        if not self.initialized:
            self.initialize()
        self.params = self.parser.parse_args(args, namespace=namespace)

        if self.params.configure == "kitti":
            self.params.flow_checkpoint = "FlowNet2-KITTI"
            self.params.model_type = "monodepth2"
            self.params.overlap_ratio = 0.5
            if 'matcher' in self.params:
                self.params.matcher = 'sequential'

        # Resolve unspecified parameters
        model = get_depth_model(self.params.model_type)

        if self.params.align <= 0:
            self.params.align = model.align

        if self.params.learning_rate <= 0:
            self.params.learning_rate = model.learning_rate

        if self.params.lambda_view_baseline < 0:
            self.params.lambda_view_baseline = model.lambda_view_baseline

        self.print_p()

        return self.params
    def __init__(self, range_dir, frames, params):
        self.frames = frames
        self.params = params
        self.base_dir = params.path
        self.range_dir = range_dir
        self.out_dir = pjoin(self.range_dir, make_tag(params))
        os.makedirs(self.out_dir, exist_ok=True)
        print(f"Fine-tuning directory: '{self.out_dir}'")
        # checkpoints文件路径
        self.checkpoints_dir = pjoin(self.out_dir, "checkpoints")
        os.makedirs(self.checkpoints_dir, exist_ok=True)
        # 查询生成的模型
        model = get_depth_model(params.model_type)
        self.model = model()
        # 查询存在GPU
        num_gpus = torch.cuda.device_count()
        print(f"Using {num_gpus} GPUs.")
        if num_gpus > 1:
            self.params.batch_size *= num_gpus
            print(f"Adjusting batch size to {self.params.batch_size}.")

        self.reference_disparity = {}
        self.vis_depth_scale = None