def test(loader,model,epoch=-1,shape_aggregation="",reference_BB="",model_fusion="pointcloud",max_iter=-1,IoU_Space=3): batch_time = AverageMeter() data_time = AverageMeter() Success_main = Success() Precision_main = Precision() Success_batch = Success() Precision_batch = Precision() # switch to evaluate mode model.eval() end = time.time() dataset = loader.dataset batch_num = 0 with tqdm(enumerate(loader), total=len(loader.dataset.list_of_anno)) as t: for batch in loader: batch_num = batch_num+1 # measure data loading time data_time.update((time.time() - end)) for PCs, BBs, list_of_anno in batch: # tracklet results_BBs = [] for i, _ in enumerate(PCs): this_anno = list_of_anno[i] this_BB = BBs[i] this_PC = PCs[i] gt_boxs = [] result_boxs = [] # INITIAL FRAME if i == 0: box = BBs[i] results_BBs.append(box) model_PC = utils.getModel([this_PC], [this_BB], offset=dataset.offset_BB, scale=dataset.scale_BB) else: previous_BB = BBs[i - 1] # DEFINE REFERENCE BB if ("previous_result".upper() in reference_BB.upper()): ref_BB = results_BBs[-1] elif ("previous_gt".upper() in reference_BB.upper()): ref_BB = previous_BB # ref_BB = utils.getOffsetBB(this_BB,np.array([-1,1,1])) elif ("current_gt".upper() in reference_BB.upper()): ref_BB = this_BB candidate_PC,candidate_label,candidate_reg, new_ref_box, new_this_box = utils.cropAndCenterPC_label_test( this_PC, ref_BB,this_BB, offset=dataset.offset_BB, scale=dataset.scale_BB) candidate_PCs,candidate_labels,candidate_reg = utils.regularizePCwithlabel(candidate_PC, candidate_label,candidate_reg,dataset.input_size,istrain=False) candidate_PCs_torch = candidate_PCs.unsqueeze(0).cuda() # AGGREGATION: IO vs ONLY0 vs ONLYI vs ALL if ("firstandprevious".upper() in shape_aggregation.upper()): model_PC = utils.getModel([PCs[0], PCs[i-1]], [results_BBs[0],results_BBs[i-1]],offset=dataset.offset_BB,scale=dataset.scale_BB) elif ("first".upper() in shape_aggregation.upper()): model_PC = utils.getModel([PCs[0]], [results_BBs[0]],offset=dataset.offset_BB,scale=dataset.scale_BB) elif ("previous".upper() in shape_aggregation.upper()): model_PC = utils.getModel([PCs[i-1]], [results_BBs[i-1]],offset=dataset.offset_BB,scale=dataset.scale_BB) elif ("all".upper() in shape_aggregation.upper()): model_PC = utils.getModel(PCs[:i],results_BBs,offset=dataset.offset_BB,scale=dataset.scale_BB) else: model_PC = utils.getModel(PCs[:i],results_BBs,offset=dataset.offset_BB,scale=dataset.scale_BB) model_PC_torch = utils.regularizePC(model_PC, dataset.input_size,istrain=False).unsqueeze(0) model_PC_torch = Variable(model_PC_torch, requires_grad=False).cuda() candidate_PCs_torch = Variable(candidate_PCs_torch, requires_grad=False).cuda() estimation_cla, estimation_reg, estimation_box, center_xyz = model(model_PC_torch, candidate_PCs_torch) estimation_boxs_cpu = estimation_box.squeeze(0).detach().cpu().numpy() box_idx = estimation_boxs_cpu[:,4].argmax() estimation_box_cpu = estimation_boxs_cpu[box_idx,0:4] box = utils.getOffsetBB(ref_BB,estimation_box_cpu) results_BBs.append(box) # estimate overlap/accuracy fro current sample this_overlap = estimateOverlap(BBs[i], results_BBs[-1], dim=IoU_Space) this_accuracy = estimateAccuracy(BBs[i], results_BBs[-1], dim=IoU_Space) Success_main.add_overlap(this_overlap) Precision_main.add_accuracy(this_accuracy) Success_batch.add_overlap(this_overlap) Precision_batch.add_accuracy(this_accuracy) # measure elapsed time batch_time.update(time.time() - end) end = time.time() t.update(1) if Success_main.count >= max_iter and max_iter >= 0: return Success_main.average, Precision_main.average t.set_description('Test {}: '.format(epoch)+ 'Time {:.3f}s '.format(batch_time.avg)+ '(it:{:.3f}s) '.format(batch_time.val)+ 'Data:{:.3f}s '.format(data_time.avg)+ '(it:{:.3f}s), '.format(data_time.val)+ 'Succ/Prec:'+ '{:.1f}/'.format(Success_main.average)+ '{:.1f}'.format(Precision_main.average)) logging.info('batch {}'.format(batch_num)+'Succ/Prec:'+ '{:.1f}/'.format(Success_batch.average)+ '{:.1f}'.format(Precision_batch.average)) Success_batch.reset() Precision_batch.reset() return Success_main.average, Precision_main.average
def getScoreHingeIoU(a, b): score = estimateOverlap(a, b) if score < 0.5: score = 0.0 return torch.tensor([score])
def test(loader, model, model_name="dummy_model", epoch=-1, shape_aggregation="", search_space="", number_candidate=125, reference_BB="", model_fusion="pointcloud", max_iter=-1, IoU_Space=3, DetailedMetrics=False): batch_time = AverageMeter() data_time = AverageMeter() Success_main = Success() Precision_main = Precision() Accuracy_Completeness_main = Accuracy_Completeness() Precision_occluded = [Precision(), Precision()] Success_occluded = [Success(), Success()] Precision_dynamic = [Precision(), Precision()] Success_dynamic = [Success(), Success()] # SEARCH SPACE INIT if ("Kalman".upper() in search_space.upper()): search_space_sampler = KalmanFiltering() elif ("Particle".upper() in search_space.upper()): search_space_sampler = ParticleFiltering() elif ("GMM".upper() in search_space.upper()): search_space_sampler = GaussianMixtureModel(n_comp=int(search_space[3:])) else: search_space_sampler = ExhaustiveSearch() # switch to evaluate mode model.eval() end = time.time() dataset = loader.dataset with tqdm(enumerate(loader), total=len(loader.dataset.list_of_anno), ncols=220) as t: for batch in loader: # measure data loading time data_time.update((time.time() - end)) for PCs, BBs, list_of_anno in batch: # tracklet search_space_sampler.reset() results_BBs = [] results_scores = [] results_latents = [] for i, _ in enumerate(PCs): this_anno = list_of_anno[i] this_BB = BBs[i] this_PC = PCs[i] # IS THE POINT CLOUD OCCLUDED? occluded = this_anno["occluded"] if occluded in [0]: # FULLY VISIBLE occluded = 0 elif occluded in [1, 2]: # PARTIALLY AND FULLY OCCLUDED occluded = 1 else: occluded = -1 # INITIAL FRAME if i == 0: box = BBs[i] model_PC = utils.getModel([this_PC], [this_BB], offset=dataset.offset_BB, scale=dataset.scale_BB) if "latent".upper() in model_fusion.upper(): this_latent = model.AE.encode( utils.regularizePC(model_PC, model).cuda())[0] score = 1.0 candidate_BBs = [] dynamic = -1 else: # previous_PC = PCs[i - 1] previous_BB = BBs[i - 1] # previous_anno = list_of_anno[i - 1] # IS THE SAMPLE dynamic? if (np.linalg.norm(this_BB.center - previous_BB.center) > 0.709): # for complete set dynamic = 1 else: dynamic = 0 # DEFINE REFERENCE BB if ("previous_result".upper() in reference_BB.upper()): ref_BB = results_BBs[-1] elif ("previous_gt".upper() in reference_BB.upper()): ref_BB = previous_BB elif ("current_gt".upper() in reference_BB.upper()): ref_BB = this_BB search_space = search_space_sampler.sample( number_candidate) candidate_BBs = utils.generate_boxes( ref_BB, search_space=search_space) candidate_PCs = [ utils.cropAndCenterPC( this_PC, box, offset=dataset.offset_BB, scale=dataset.scale_BB) for box in candidate_BBs ] candidate_PCs_reg = [ utils.regularizePC(PC, model) for PC in candidate_PCs ] candidate_PCs_torch = torch.cat( candidate_PCs_reg, dim=0).cuda() # DATA FUSION: PC vs LATENT if "latent".upper() in model_fusion.upper(): candidate_PCs_encoded = model.AE.encode(candidate_PCs_torch) model_PC_encoded = torch.stack(results_latents) # stack all latent vectors # AGGREGATION: IO vs ONLY0 vs ONLYI vs AVG vs MEDIAN vs MAX if ("firstandprevious".upper() in shape_aggregation.upper()): model_PC_encoded = (model_PC_encoded[0] + model_PC_encoded[i-1] )/ 2 elif "first".upper() in shape_aggregation.upper(): model_PC_encoded = model_PC_encoded[0] elif "previous".upper() in shape_aggregation.upper(): model_PC_encoded = model_PC_encoded[i-1] elif "MEDIAN".upper() in shape_aggregation.upper(): model_PC_encoded = torch.median(model_PC_encoded,0)[0] elif ("MAX".upper() in shape_aggregation.upper()): model_PC_encoded = torch.max(model_PC_encoded,0)[0] elif ("AVG".upper() in shape_aggregation.upper()): model_PC_encoded = torch.mean(model_PC_encoded,0) else: model_PC_encoded = torch.mean(model_PC_encoded,0) # repeat model_encoded for size similarity with candidate_PCs repeat_shape = np.ones(len(candidate_PCs_encoded.shape), dtype=np.int32) repeat_shape[0] = len(candidate_PCs_encoded) model_PC_encoded = model_PC_encoded.repeat(tuple(repeat_shape)).cuda() #TODO: remove torch dependency =-> Functional # Y_AE = model.AE.forward(prev_PC) output = F.cosine_similarity(candidate_PCs_encoded, model_PC_encoded, dim=1) scores = output.detach().cpu().numpy() elif "pointcloud".upper() in model_fusion.upper(): # AGGREGATION: IO vs ONLY0 vs ONLYI vs ALL if ("firstandprevious".upper() in shape_aggregation.upper()): model_PC = utils.getModel( [PCs[0], PCs[i - 1]], [results_BBs[0], results_BBs[i - 1]], offset=dataset.offset_BB, scale=dataset.scale_BB) elif ("first".upper() in shape_aggregation.upper()): model_PC = utils.getModel( [PCs[0]], [results_BBs[0]], offset=dataset.offset_BB, scale=dataset.scale_BB) elif ("previous".upper() in shape_aggregation. upper()): model_PC = utils.getModel( [PCs[i - 1]], [results_BBs[i - 1]], offset=dataset.offset_BB, scale=dataset.scale_BB) elif ("all".upper() in shape_aggregation.upper()): model_PC = utils.getModel( PCs[:i], results_BBs, offset=dataset.offset_BB, scale=dataset.scale_BB) else: model_PC = utils.getModel( PCs[:i], results_BBs, offset=dataset.offset_BB, scale=dataset.scale_BB) repeat_shape = np.ones( len(candidate_PCs_torch.shape), dtype=np.int32) repeat_shape[0] = len(candidate_PCs_torch) model_PC_encoded = utils.regularizePC( model_PC, model).repeat(tuple(repeat_shape)).cuda() output, decoded_PC = model(candidate_PCs_torch, model_PC_encoded) scores = output.detach().cpu().numpy() elif "space".upper() in model_fusion.upper(): scores = np.array([ utils.distanceBB_Gaussian(bb, this_BB) for bb in candidate_BBs ]) search_space_sampler.addData(data=search_space, score=scores.T) idx = np.argmax(scores) score = scores[idx] box = candidate_BBs[idx] if "latent".upper() in model_fusion.upper(): this_latent = candidate_PCs_encoded[idx] if(DetailedMetrics): # Construct GT model gt_model_PC_start_idx = max(0,i-10) gt_model_PC_end_idx = min(i+10,len(PCs)) gt_model_PC = utils.getModel( PCs[gt_model_PC_start_idx:gt_model_PC_end_idx], BBs[gt_model_PC_start_idx:gt_model_PC_end_idx], offset=dataset.offset_BB, scale=dataset.scale_BB) if(gt_model_PC.points.shape[1]>0): gt_model_PC = gt_model_PC.convertToPytorch().float().unsqueeze(2).permute(2,0,1) gt_candidate_PC = utils.regularizePC( utils.cropAndCenterPC( this_PC, this_BB, offset=dataset.offset_BB, scale=dataset.scale_BB), model).cuda() decoded_PC = model.AE.decode( model.AE.encode( gt_candidate_PC)).detach().cpu() Accuracy_Completeness_main.update( decoded_PC, gt_model_PC) results_BBs.append(box) results_scores.append(score) if "latent".upper() in model_fusion.upper(): results_latents.append(this_latent.detach().cpu()) # estimate overlap/accuracy fro current sample this_overlap = estimateOverlap(this_BB, box, dim=IoU_Space) this_accuracy = estimateAccuracy(this_BB, box, dim=IoU_Space) Success_main.add_overlap(this_overlap) Precision_main.add_accuracy(this_accuracy) if (dynamic >= 0): Success_dynamic[dynamic].add_overlap(this_overlap) Precision_dynamic[dynamic].add_accuracy(this_accuracy) if (occluded >= 0): Success_occluded[occluded].add_overlap(this_overlap) Precision_occluded[occluded].add_accuracy(this_accuracy) # measure elapsed time batch_time.update(time.time() - end) end = time.time() t.update(1) if Success_main.count >= max_iter and max_iter >= 0: return Success_main.average, Precision_main.average t.set_description(f'Test {epoch}: ' f'Time {batch_time.avg:.3f}s ' f'(it:{batch_time.val:.3f}s) ' f'Data:{data_time.avg:.3f}s ' f'(it:{data_time.val:.3f}s), ' f'Succ/Prec:' f'{Success_main.average:.1f}/' f'{Precision_main.average:.1f}') if DetailedMetrics: logging.info(f"Succ/Prec fully visible({Success_occluded[0].count}):") logging.info(f"{Success_occluded[0].average:.1f}/{Precision_occluded[0].average:.1f}") logging.info(f"Succ/Prec occluded({Success_occluded[1].count}):") logging.info(f"{Success_occluded[1].average:.1f}/{Precision_occluded[1].average:.1f}") logging.info(f"Succ/Prec dynamic({Success_dynamic[0].count}):") logging.info(f"{Success_dynamic[0].average:.1f}/{Precision_dynamic[0].average:.1f}") logging.info(f"Succ/Prec static({Success_dynamic[1].count}):") logging.info(f"{Success_dynamic[1].average:.1f}/{Precision_dynamic[1].average:.1f}") logging.info(f"Acc/Comp ({Accuracy_Completeness_main.count}):") logging.info(f"{Accuracy_Completeness_main.average[0]:.4f}/{Accuracy_Completeness_main.average[1]:.4f}") return Success_main.average, Precision_main.average
def getScoreIoU(a, b): score = estimateOverlap(a, b) return torch.tensor([score])