예제 #1
0
    def set_dataset(self):
        """properly handle multiple dataset situation
        """
        test_fpath = os.path.join(os.path.dirname(os.path.dirname(__file__)),
                                  "splits", self.opt.split, "test_files.txt")
        self.val_filenames = readlines(test_fpath)

        val_dataset = KittiDataset(
            self.opt.data_path,
            self.opt.val_gt_path,
            self.val_filenames,
            self.opt.height,
            self.opt.width,
            crph=self.opt.crph,
            crpw=self.opt.crpw,
            is_train=False,
            predang_path=self.opt.predang_path,
            semanticspred_path=self.opt.semanticspred_path,
            threeinput=self.opt.threeinput)

        self.val_loader = DataLoader(val_dataset,
                                     batch_size=1,
                                     shuffle=False,
                                     num_workers=self.opt.num_workers,
                                     pin_memory=True,
                                     drop_last=True)

        self.val_num = val_dataset.__len__()

        mapping = readlines(
            os.path.join(self.opt.trainmapping_fold, 'training_mapping.txt'))
        self.indmapping = self.get_indmapping(mapping)
예제 #2
0
    def set_dataset(self):
        """properly handle multiple dataset situation
        """

        fpath = os.path.join(os.getcwd(), "splits", self.opt.split,
                             "{}_files.txt")
        val_filenames = readlines(fpath.format("train"))

        val_dataset = KittiDataset(
            self.opt.data_path,
            self.opt.gt_path,
            val_filenames,
            self.opt.height,
            self.opt.width,
            crph=self.opt.crph,
            crpw=self.opt.crpw,
            is_train=False,
            semanticspred_path=self.opt.semanticspred_path)

        self.val_loader = DataLoader(val_dataset,
                                     self.opt.batch_size,
                                     shuffle=True,
                                     num_workers=0,
                                     pin_memory=True,
                                     drop_last=True)

        self.val_num = val_dataset.__len__()
예제 #3
0
    def set_dataset(self):
        """properly handle multiple dataset situation
        """

        self.valdatasets = dict()
        test_fpath = os.path.join(os.path.dirname(os.path.dirname(__file__)),
                                  "splits", self.opt.split, "test_files.txt")
        val_filenames = readlines(test_fpath)

        val_dataset_rawLidar = KittiDataset(self.opt.data_path,
                                            self.opt.gt_path_rawLidar,
                                            val_filenames,
                                            self.opt.height,
                                            self.opt.width,
                                            crph=self.opt.crph,
                                            crpw=self.opt.crpw,
                                            is_train=False)
        val_dataset_filterLidar = KittiDataset(self.opt.data_path,
                                               self.opt.gt_path_filterLidar,
                                               val_filenames,
                                               self.opt.height,
                                               self.opt.width,
                                               crph=self.opt.crph,
                                               crpw=self.opt.crpw,
                                               is_train=False)
        val_dataset_semidense = KittiDataset(self.opt.data_path,
                                             self.opt.gt_path_semidense,
                                             val_filenames,
                                             self.opt.height,
                                             self.opt.width,
                                             crph=self.opt.crph,
                                             crpw=self.opt.crpw,
                                             is_train=False)

        self.valdatasets['rawLidar'] = DataLoader(
            val_dataset_rawLidar,
            self.opt.batch_size,
            shuffle=False,
            num_workers=self.opt.num_workers,
            pin_memory=True,
            drop_last=True)
        self.valdatasets['filterLidar'] = DataLoader(
            val_dataset_filterLidar,
            self.opt.batch_size,
            shuffle=False,
            num_workers=self.opt.num_workers,
            pin_memory=True,
            drop_last=True)
        self.valdatasets['semidense'] = DataLoader(
            val_dataset_semidense,
            self.opt.batch_size,
            shuffle=False,
            num_workers=self.opt.num_workers,
            pin_memory=True,
            drop_last=True)
예제 #4
0
    def set_dataset(self):
        """properly handle multiple dataset situation
        """

        fpath = os.path.join(os.path.dirname(os.path.dirname(__file__)),
                             "splits", self.opt.split, "{}_files.txt")
        test_fpath = os.path.join(os.path.dirname(os.path.dirname(__file__)),
                                  "splits", self.opt.split, "test_files.txt")
        train_filenames = readlines(fpath.format("train"))
        val_filenames = readlines(test_fpath)

        train_dataset = KittiDataset(
            self.opt.data_path,
            self.opt.gt_path,
            train_filenames,
            self.opt.height,
            self.opt.width,
            crph=self.opt.crph,
            crpw=self.opt.crpw,
            is_train=True,
            predang_path=self.opt.predang_path,
            semanticspred_path=self.opt.semanticspred_path)

        val_dataset = KittiDataset(
            self.opt.data_path,
            self.opt.val_gt_path,
            val_filenames,
            self.opt.height,
            self.opt.width,
            crph=self.opt.crph,
            crpw=self.opt.crpw,
            is_train=False,
            predang_path=self.opt.predang_path,
            semanticspred_path=self.opt.semanticspred_path)

        self.train_loader = DataLoader(train_dataset,
                                       self.opt.batch_size,
                                       shuffle=True,
                                       num_workers=self.opt.num_workers,
                                       pin_memory=True,
                                       drop_last=True)
        self.val_loader = DataLoader(val_dataset,
                                     self.opt.batch_size,
                                     shuffle=True,
                                     num_workers=self.opt.num_workers,
                                     pin_memory=True,
                                     drop_last=True)
예제 #5
0
    def set_dataset(self):
        """properly handle multiple dataset situation
        """

        fpath = os.path.join(os.getcwd(), "splits", self.opt.split, "{}_files.txt")
        val_filenames = readlines(fpath.format("test"))
        # val_filenames = readlines('/home/shengjie/Documents/Project_SemanticDepth/splits/eigen/test_files.txt')

        val_dataset = KittiDataset(
            self.opt.data_path, self.opt.gt_path, val_filenames, self.opt.height, self.opt.width,
            crph=self.opt.crph, crpw=self.opt.crpw, is_train=False, instancepred_path=self.opt.instancepred_path
        )

        self.val_loader = DataLoader(
            val_dataset, self.opt.batch_size, shuffle=False,
            num_workers=0, pin_memory=True, drop_last=False)

        self.val_num = val_dataset.__len__()
예제 #6
0
    def set_dataset(self):
        """properly handle multiple dataset situation
        """

        fpath = os.path.join(os.getcwd(), "splits", self.opt.split,
                             "{}_files.txt")
        test_fpath = os.path.join(os.getcwd(), "splits", self.opt.split,
                                  "test_files.txt")
        train_filenames = readlines(fpath.format("train"))
        val_filenames = readlines(test_fpath)

        train_dataset = KittiDataset(self.opt.data_path,
                                     self.opt.gt_path,
                                     train_filenames,
                                     self.opt.height,
                                     self.opt.width,
                                     crph=self.opt.crph,
                                     crpw=self.opt.crpw,
                                     is_train=True)

        val_dataset = KittiDataset(self.opt.data_path,
                                   self.opt.val_gt_path,
                                   val_filenames,
                                   self.opt.height,
                                   self.opt.width,
                                   crph=self.opt.crph,
                                   crpw=self.opt.crpw,
                                   is_train=False)

        self.train_loader = DataLoader(train_dataset,
                                       self.opt.batch_size,
                                       shuffle=not self.opt.ban_shuffle,
                                       num_workers=self.opt.num_workers,
                                       pin_memory=True,
                                       drop_last=True)
        self.val_loader = DataLoader(val_dataset,
                                     self.opt.batch_size,
                                     shuffle=False,
                                     num_workers=self.opt.num_workers,
                                     pin_memory=True,
                                     drop_last=True)

        self.train_num = train_dataset.__len__()
        self.val_num = val_dataset.__len__()
        self.num_total_steps = self.train_num // self.opt.batch_size * self.opt.num_epochs