예제 #1
0
파일: Dataset.py 프로젝트: WayneZXY/SA-P2B
    def __init__(self,
                 input_size,
                 path,
                 split="",
                 category_name="Car",
                 regress="GAUSSIAN",
                 sigma_Gaussian=1,
                 offset_BB=0,
                 scale_BB=1.0):
        # 主要预加载点云和模板点云的训练集
        super(SiameseTrain,
              self).__init__(input_size=input_size,
                             path=path,
                             split=split,
                             category_name=category_name,
                             regress=regress,
                             offset_BB=offset_BB,
                             scale_BB=scale_BB)  # 从SiameseDataset父类继承属性

        self.sigma_Gaussian = sigma_Gaussian
        self.offset_BB = offset_BB
        self.scale_BB = scale_BB
        self.saved_train_BBs_dir = '/mnt/ssd-data/RUNNING/data/train_data.list_of_BBs.npy'
        self.saved_train_PCs_dir = '/mnt/ssd-data/RUNNING/data/train_data.list_of_PCs.npy'
        self.saved_valid_BBs_dir = '/mnt/ssd-data/RUNNING/data/valid_data.list_of_BBs.npy'
        self.saved_valid_PCs_dir = '/mnt/ssd-data/RUNNING/data/valid_data.list_of_PCs.npy'
        # self.saved_train_BBs_dir = "E:\RUNNING\data\\train_data.list_of_BBs.npy"
        # self.saved_train_PCs_dir = "E:\RUNNING\data\\train_data.list_of_PCs.npy"
        # self.saved_valid_BBs_dir = "E:\RUNNING\data\\valid_data.list_of_BBs.npy"
        # self.saved_valid_PCs_dir = "E:\RUNNING\data\\valid_data.list_of_PCs.npy"
        # self.saved_model_PCs_dir = '/media/zhouxiaoyu/本地磁盘/RUNNING/P2B/train_data.model_PC.npy'
        # self.saved_annos_dir = '/media/zhouxiaoyu/本地磁盘/RUNNING/P2B/train_data.list_of_anno.npy'
        self.num_candidates_perframe = 4  # 每帧的候选目标数目?

        logging.info("preloading PC...")
        self.list_of_PCs = [None] * len(self.list_of_anno)
        # self.list_of_anno从SiameseDataset继承过来
        self.list_of_BBs = [None] * len(self.list_of_anno)
        if np.load(self.saved_train_PCs_dir, allow_pickle=True) is not None:
            if self.split == 'Train':
                self.list_of_PCs = np.load(self.saved_train_PCs_dir,
                                           allow_pickle=True).tolist()
                self.list_of_BBs = np.load(self.saved_train_BBs_dir,
                                           allow_pickle=True).tolist()
            else:
                self.list_of_PCs = np.load(self.saved_valid_PCs_dir,
                                           allow_pickle=True).tolist()
                self.list_of_BBs = np.load(self.saved_valid_BBs_dir,
                                           allow_pickle=True).tolist()
        else:
            for index in tqdm(range(len(self.list_of_anno)),
                              desc='all annotations'):
                anno = self.list_of_anno[index]  # 获取标注
                PC, box = self.getBBandPC(anno)  # 根据标注得到点云和边界盒
                # 点云4维,边界盒17维(可参考dataset_class.Box.__repr__)
                new_PC = utils.cropPC(PC, box, offset=10)  # 根据缩放和偏移后的边界盒裁切点云

                self.list_of_PCs[index] = new_PC
                self.list_of_BBs[index] = box
        logging.info("PC preloaded!")

        logging.info("preloading Model..")
        self.model_PC = [None] * len(self.list_of_tracklet_anno)
        # len_model_PC = [None] * len(self.list_of_tracklet_anno)
        # 保存模板点云的空列表
        # 长度为数据集包含的所有序列里属于某一类的实例对象的个数
        for i in tqdm(range(len(self.list_of_tracklet_anno)),
                      desc='annotations of a certain instance'):
            list_of_anno = self.list_of_tracklet_anno[i]
            # 读出某一实例对象的所有标注
            PCs = []
            BBs = []
            cnt = 0
            for anno in list_of_anno:
                this_PC, this_BB = self.getBBandPC(anno)
                PCs.append(this_PC)
                BBs.append(this_BB)
                anno["model_idx"] = i
                # 在anno的pd.Series里面加入model_idx属性
                # 表示是第几个实例对象
                anno["relative_idx"] = cnt
                # 在anno的pd.Series里面加入relative_idx属性
                # 表示是这个实例对象的第几个标注
                cnt += 1

            self.model_PC[i] = getModel(PCs,
                                        BBs,
                                        offset=self.offset_BB,
                                        scale=self.scale_BB)
            # len_model_PC[i] = len(self.model_PC[i])
            # 读出该对象的所有模板点云和对应真值框
        logging.info("Model preloaded!")
예제 #2
0
파일: Dataset.py 프로젝트: WayneZXY/SA-P2B
    def getitem(self, index):  # 实际getitem属性
        # 依据给定索引读取对应采样点云、标签、边界盒回归信息、真值点云
        anno_idx = self.getAnnotationIndex(index)
        sample_idx = self.getSearchSpaceIndex(index)

        if sample_idx == 0:
            sample_offsets = np.zeros(3)
        else:
            gaussian = KalmanFiltering(bnd=[1, 1, 5])
            # 实例化之后就先根据bud执行reset方法
            # 初始化平均值、方差、数据和得分权重
            sample_offsets = gaussian.sample(1)[0]
            # 得到服从多变量正态分布的随机采样偏移

        this_anno = self.list_of_anno[anno_idx]
        this_PC, this_BB = self.getPCandBBfromIndex(anno_idx)
        sample_BB = utils.getOffsetBB(this_BB, sample_offsets)
        # 在现有标注盒基础上得到随机偏移采样边界盒

        # sample_PC = utils.cropAndCenterPC(
        #     this_PC, sample_BB, offset=self.offset_BB, scale=self.scale_BB)
        sample_PC, sample_label, sample_reg = utils.cropAndCenterPC_label(
            this_PC,
            sample_BB,
            this_BB,
            sample_offsets,
            offset=self.offset_BB,
            scale=self.scale_BB)
        # sample_PC:采样点云
        # sample_label:采样点云的正负样本标签
        # sample_reg:采样点云的边界盒回归信息
        if sample_PC.nbr_points() <= 20:
            return self.getitem(np.random.randint(0, self.__len__()))
        # 如果采样到的点数量较少
        # 就读取一定数量的点云
        # sample_PC = utils.regularizePC(sample_PC, self.input_size)[0]
        sample_PC, sample_label, sample_reg, sample_seg_label, sample_seg_offset = \
            utils.regularizePCwithlabel(sample_PC, sample_label, sample_reg, self.input_size)

        if this_anno["relative_idx"] == 0:  # 如果这是这个实例对象的第一个标注
            prev_idx = 0  # 前一个索引值
            fir_idx = 0  # 第一个索引值
        else:
            prev_idx = anno_idx - 1  # 前一个索引值
            fir_idx = anno_idx - this_anno["relative_idx"]
            # 第一个索引值 = 当前索引值 - 在本实例中的相对索引值
        gt_PC_pre, gt_BB_pre = self.getPCandBBfromIndex(prev_idx)
        gt_PC_fir, gt_BB_fir = self.getPCandBBfromIndex(fir_idx)

        if sample_idx == 0:
            samplegt_offsets = np.zeros(3)
        else:
            samplegt_offsets = np.random.uniform(low=-0.3, high=0.3, size=3)
            samplegt_offsets[2] = samplegt_offsets[2] * 5.0
        gt_BB_pre = utils.getOffsetBB(gt_BB_pre, samplegt_offsets)

        gt_PC = getModel([gt_PC_fir, gt_PC_pre], [gt_BB_fir, gt_BB_pre],
                         offset=self.offset_BB,
                         scale=self.scale_BB)

        if gt_PC.nbr_points() <= 20:
            return self.getitem(np.random.randint(0, self.__len__()))
        gt_PC = utils.regularizePC(gt_PC, self.input_size)
        # gt_PC = np.array(gt_PC.points, dtype=np.float32)
        # gt_PC = torch.from_numpy(gt_PC).float()

        return sample_PC, sample_label, sample_reg, gt_PC, sample_seg_label, sample_seg_offset
예제 #3
0
파일: test_tracking.py 프로젝트: nmll/P2B
def test(loader,model,epoch=-1,shape_aggregation="",reference_BB="",model_fusion="pointcloud",max_iter=-1,IoU_Space=3):

    batch_time = AverageMeter()
    data_time = AverageMeter()

    Success_main = Success()
    Precision_main = Precision()
    Success_batch = Success()
    Precision_batch = Precision()

    # switch to evaluate mode
    model.eval()
    end = time.time()

    dataset = loader.dataset
    batch_num = 0

    with tqdm(enumerate(loader), total=len(loader.dataset.list_of_anno)) as t:
        for batch in loader:          
            batch_num = batch_num+1
            # measure data loading time
            data_time.update((time.time() - end))
            for PCs, BBs, list_of_anno in batch: # tracklet
                results_BBs = []

                for i, _ in enumerate(PCs):
                    this_anno = list_of_anno[i]
                    this_BB = BBs[i]
                    this_PC = PCs[i]
                    gt_boxs = []
                    result_boxs = []

                    # INITIAL FRAME
                    if i == 0:
                        box = BBs[i]
                        results_BBs.append(box)
                        model_PC = utils.getModel([this_PC], [this_BB], offset=dataset.offset_BB, scale=dataset.scale_BB)

                    else:
                        previous_BB = BBs[i - 1]

                        # DEFINE REFERENCE BB
                        if ("previous_result".upper() in reference_BB.upper()):
                            ref_BB = results_BBs[-1]
                        elif ("previous_gt".upper() in reference_BB.upper()):
                            ref_BB = previous_BB
                            # ref_BB = utils.getOffsetBB(this_BB,np.array([-1,1,1]))
                        elif ("current_gt".upper() in reference_BB.upper()):
                            ref_BB = this_BB

                        candidate_PC,candidate_label,candidate_reg, new_ref_box, new_this_box = utils.cropAndCenterPC_label_test(
                                        this_PC,
                                        ref_BB,this_BB,
                                        offset=dataset.offset_BB,
                                        scale=dataset.scale_BB)
                        
                        candidate_PCs,candidate_labels,candidate_reg = utils.regularizePCwithlabel(candidate_PC, candidate_label,candidate_reg,dataset.input_size,istrain=False)
                        
                        candidate_PCs_torch = candidate_PCs.unsqueeze(0).cuda()

                            # AGGREGATION: IO vs ONLY0 vs ONLYI vs ALL
                        if ("firstandprevious".upper() in shape_aggregation.upper()):
                            model_PC = utils.getModel([PCs[0], PCs[i-1]], [results_BBs[0],results_BBs[i-1]],offset=dataset.offset_BB,scale=dataset.scale_BB)
                        elif ("first".upper() in shape_aggregation.upper()):
                            model_PC = utils.getModel([PCs[0]], [results_BBs[0]],offset=dataset.offset_BB,scale=dataset.scale_BB)
                        elif ("previous".upper() in shape_aggregation.upper()):
                            model_PC = utils.getModel([PCs[i-1]], [results_BBs[i-1]],offset=dataset.offset_BB,scale=dataset.scale_BB)
                        elif ("all".upper() in shape_aggregation.upper()):
                            model_PC = utils.getModel(PCs[:i],results_BBs,offset=dataset.offset_BB,scale=dataset.scale_BB)
                        else:
                            model_PC = utils.getModel(PCs[:i],results_BBs,offset=dataset.offset_BB,scale=dataset.scale_BB)

                        model_PC_torch = utils.regularizePC(model_PC, dataset.input_size,istrain=False).unsqueeze(0)
                        model_PC_torch = Variable(model_PC_torch, requires_grad=False).cuda()
                        candidate_PCs_torch = Variable(candidate_PCs_torch, requires_grad=False).cuda()

                        estimation_cla, estimation_reg, estimation_box, center_xyz = model(model_PC_torch, candidate_PCs_torch)
                        estimation_boxs_cpu = estimation_box.squeeze(0).detach().cpu().numpy()
                        box_idx = estimation_boxs_cpu[:,4].argmax()
                        estimation_box_cpu = estimation_boxs_cpu[box_idx,0:4]
                        
                        box = utils.getOffsetBB(ref_BB,estimation_box_cpu)
                        results_BBs.append(box)

                    # estimate overlap/accuracy fro current sample
                    this_overlap = estimateOverlap(BBs[i], results_BBs[-1], dim=IoU_Space)
                    this_accuracy = estimateAccuracy(BBs[i], results_BBs[-1], dim=IoU_Space)

                    Success_main.add_overlap(this_overlap)
                    Precision_main.add_accuracy(this_accuracy)
                    Success_batch.add_overlap(this_overlap)
                    Precision_batch.add_accuracy(this_accuracy)

                    # measure elapsed time
                    batch_time.update(time.time() - end)
                    end = time.time()

                    t.update(1)

                    if Success_main.count >= max_iter and max_iter >= 0:
                        return Success_main.average, Precision_main.average


                t.set_description('Test {}: '.format(epoch)+
                                  'Time {:.3f}s '.format(batch_time.avg)+
                                  '(it:{:.3f}s) '.format(batch_time.val)+
                                  'Data:{:.3f}s '.format(data_time.avg)+
                                  '(it:{:.3f}s), '.format(data_time.val)+
                                  'Succ/Prec:'+
                                  '{:.1f}/'.format(Success_main.average)+
                                  '{:.1f}'.format(Precision_main.average))
                logging.info('batch {}'.format(batch_num)+'Succ/Prec:'+
                                  '{:.1f}/'.format(Success_batch.average)+
                                  '{:.1f}'.format(Precision_batch.average))
                Success_batch.reset()
                Precision_batch.reset()

    return Success_main.average, Precision_main.average