def evaluate(self, epoch): self.model.eval() data_timer, model_timer = Timer(), Timer() desc_loss_meter, det_loss_meter, acc_meter, d_pos_meter, d_neg_meter = AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter() num_iter = int(len(self.val_loader.dataset) // self.val_loader.batch_size) num_iter = min(self.val_max_iter, num_iter) test_loader_iter = self.val_loader.__iter__() for iter in range(num_iter): data_timer.tic() inputs = test_loader_iter.next() for k, v in inputs.items(): # load inputs to device. if type(v) == list: inputs[k] = [item.to(self.device) for item in v] else: inputs[k] = v.to(self.device) data_timer.toc() model_timer.tic() features, scores = self.model(inputs) anc_features = features[inputs["corr"][:, 0].long()] pos_features = features[inputs["corr"][:, 1].long() + inputs['stack_lengths'][0][0]] anc_scores = scores[inputs["corr"][:, 0].long()] pos_scores = scores[inputs["corr"][:, 1].long() + inputs['stack_lengths'][0][0]] desc_loss, acc, d_pos, d_neg, _, dist = self.evaluation_metric['desc_loss'](anc_features, pos_features, inputs['dist_keypts']) det_loss = self.evaluation_metric['det_loss'](dist, anc_scores, pos_scores) loss = desc_loss * self.metric_weight['desc_loss'] + det_loss * self.metric_weight['det_loss'] d_pos = np.mean(d_pos) d_neg = np.mean(d_neg) model_timer.toc() desc_loss_meter.update(float(desc_loss)) det_loss_meter.update(float(det_loss)) d_pos_meter.update(float(d_pos)) d_neg_meter.update(float(d_neg)) acc_meter.update(float(acc)) if (iter + 1) % 100 == 0 and self.verbose: print(f"Eval epoch: {epoch+1} [{iter+1:4d}/{num_iter}] " f"desc loss: {desc_loss_meter.avg:.2f} " f"det loss: {det_loss_meter.avg:.2f} " f"acc: {acc_meter.avg:.2f} " f"d_pos: {d_pos_meter.avg:.2f} " f"d_neg: {d_neg_meter.avg:.2f} " f"data time: {data_timer.avg:.2f}s " f"model time: {model_timer.avg:.2f}s") self.model.train() res = { 'desc_loss': desc_loss_meter.avg, 'det_loss': det_loss_meter.avg, 'accuracy': acc_meter.avg, 'd_pos': d_pos_meter.avg, 'd_neg': d_neg_meter.avg, } print(f'Evaluation: Epoch {epoch}: Desc Loss {res["desc_loss"]}, Det Loss {res["det_loss"]}, Accuracy {res["accuracy"]}') return res
def train_epoch(self, epoch): data_timer, model_timer = Timer(), Timer() loss_meter, acc_meter, iou_meter = AverageMeter(), AverageMeter( ), AverageMeter() num_iter = int( len(self.train_loader.dataset) // self.train_loader.batch_size) train_loader_iter = self.train_loader.__iter__() # for iter, inputs in enumerate(self.train_loader): for iter in range(num_iter): data_timer.tic() inputs = train_loader_iter.next() for k, v in inputs.items(): # load inputs to device. if type(v) == list: inputs[k] = [item.to(self.device) for item in v] else: inputs[k] = v.to(self.device) data_timer.toc() model_timer.tic() # forward self.optimizer.zero_grad() predict = self.model(inputs) labels = inputs['labels'].long().squeeze() loss = self.evaluate_metric(predict, labels) # acc = torch.sum(torch.max(predict, dim=1)[1].int() == labels.int()) * 100 / predict.shape[0] acc = calculate_acc(predict, labels) part_iou = calculate_iou(predict, labels, stack_lengths=inputs['stack_lengths'][0], n_parts=self.config.num_classes) iou = np.mean(part_iou) # backward loss.backward() self.optimizer.step() model_timer.toc() loss_meter.update(float(loss)) iou_meter.update(float(iou)) acc_meter.update(float(acc)) if (iter + 1) % 1 == 0 and self.verbose: print(f"Epoch: {epoch+1} [{iter+1:4d}/{num_iter}] " f"loss: {loss_meter.avg:.2f} " f"iou: {iou_meter.avg:.2f} " f"data time: {data_timer.avg:.2f}s " f"model time: {model_timer.avg:.2f}s") # finish one epoch epoch_time = model_timer.total_time + data_timer.total_time self.train_hist['per_epoch_time'].append(epoch_time) self.train_hist['loss'].append(loss_meter.avg) self.train_hist['accuracy'].append(acc_meter.avg) self.train_hist['iou'].append(iou_meter.avg) print( f'Epoch {epoch+1}: Loss : {loss_meter.avg:.2f}, Accuracy: {acc_meter.avg:.2f}, IOU: {iou_meter.avg:.2f}, time {epoch_time:.2f}s' )
def evaluate(self, epoch): data_timer, model_timer = Timer(), Timer() loss_meter, acc_meter, iou_meter = AverageMeter(), AverageMeter( ), AverageMeter() num_iter = int( len(self.test_loader.dataset) // self.train_loader.batch_size) test_loader_iter = self.train_loader.__iter__() for iter in range(num_iter): data_timer.tic() inputs = test_loader_iter.next() for k, v in inputs.items(): # load inputs to device. if type(v) == list: inputs[k] = [item.to(self.device) for item in v] else: inputs[k] = v.to(self.device) data_timer.toc() model_timer.tic() predict = self.model(inputs) labels = inputs['labels'].long().squeeze() loss = self.evaluate_metric(predict, labels) # acc = torch.sum(torch.max(predict, dim=1)[1].int() == labels.int()) * 100 / predict.shape[0] acc = calculate_acc(predict, labels) part_iou = calculate_iou(predict, labels, stack_lengths=inputs['stack_lengths'][0], n_parts=self.config.num_classes) iou = np.mean(part_iou) model_timer.toc() loss_meter.update(float(loss)) iou_meter.update(float(iou)) acc_meter.update(float(acc)) if (iter + 1) % 1 == 0 and self.verbose: print(f"Eval epoch: {epoch+1} [{iter+1:4d}/{num_iter}] " f"loss: {loss_meter.avg:.2f} " f"iou: {iou_meter.avg:.2f} " f"data time: {data_timer.avg:.2f}s " f"model time: {model_timer.avg:.2f}s") self.model.train() res = { 'iou': iou_meter.avg, 'loss': loss_meter.avg, 'accuracy': acc_meter.avg } return res
def evaluate(self, epoch): self.model.eval() data_timer, model_timer = Timer(), Timer() loss_meter, acc_meter = AverageMeter(), AverageMeter() num_iter = int( len(self.test_loader.dataset) / self.test_loader.batch_size) test_loader_iter = self.test_loader.__iter__() for iter in range(num_iter): data_timer.tic() inputs = test_loader_iter.next() for k, v in inputs.items(): # load inputs to device. if type(v) == list: inputs[k] = [item.to(self.device) for item in v] else: inputs[k] = v.to(self.device) data_timer.toc() model_timer.tic() predict = self.model(inputs) labels = inputs['labels'].long() loss = self.evaluate_metric(predict, labels) acc = torch.sum( torch.max(predict, dim=1)[1].int() == labels.int()) * 100 / predict.shape[0] model_timer.toc() loss_meter.update(float(loss)) acc_meter.update(float(acc)) if (iter + 1) % 1 == 0 and self.verbose: print(f"Eval epoch {epoch+1}: [{iter+1:3d}/{num_iter}] " f"loss: {loss_meter.avg:.2f} " f"acc: {acc_meter.avg:.2f} " f"data time: {data_timer.avg:.2f}s " f"model time: {model_timer.avg:.2f}s") self.model.train() res = {'loss': loss_meter.avg, 'accuracy': acc_meter.avg} return res
def train_epoch(self, epoch): data_timer, model_timer = Timer(), Timer() desc_loss_meter, det_loss_meter, acc_meter, d_pos_meter, d_neg_meter = AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter() num_iter = int(len(self.train_loader.dataset) // self.train_loader.batch_size) num_iter = min(self.training_max_iter, num_iter) train_loader_iter = self.train_loader.__iter__() # for iter, inputs in enumerate(self.train_loader): for iter in range(num_iter): data_timer.tic() inputs = train_loader_iter.next() for k, v in inputs.items(): # load inputs to device. if type(v) == list: inputs[k] = [item.to(self.device) for item in v] else: inputs[k] = v.to(self.device) data_timer.toc() model_timer.tic() # forward self.optimizer.zero_grad() features, scores = self.model(inputs) anc_features = features[inputs["corr"][:, 0].long()] pos_features = features[inputs["corr"][:, 1].long() + inputs['stack_lengths'][0][0]] anc_scores = scores[inputs["corr"][:, 0].long()] pos_scores = scores[inputs["corr"][:, 1].long() + inputs['stack_lengths'][0][0]] desc_loss, acc, d_pos, d_neg, _, dist = self.evaluation_metric["desc_loss"](anc_features, pos_features, inputs['dist_keypts']) det_loss = self.evaluation_metric['det_loss'](dist, anc_scores, pos_scores) loss = desc_loss * self.metric_weight['desc_loss'] + det_loss * self.metric_weight['det_loss'] d_pos = np.mean(d_pos) d_neg = np.mean(d_neg) # backward loss.backward() do_step = True for param in self.model.parameters(): if param.grad is not None: if (1 - torch.isfinite(param.grad).long()).sum() > 0: do_step = False break if do_step is True: self.optimizer.step() # if self.config.grad_clip_norm > 0: # torch.nn.utils.clip_grad_value_(self.model.parameters(), self.config.grad_clip_norm) model_timer.toc() desc_loss_meter.update(float(desc_loss)) det_loss_meter.update(float(det_loss)) d_pos_meter.update(float(d_pos)) d_neg_meter.update(float(d_neg)) acc_meter.update(float(acc)) if (iter + 1) % 100 == 0 and self.verbose: curr_iter = num_iter * (epoch - 1) + iter self.writer.add_scalar('train/Desc_Loss', float(desc_loss_meter.avg), curr_iter) self.writer.add_scalar('train/Det_Loss', float(det_loss_meter.avg), curr_iter) self.writer.add_scalar('train/D_pos', float(d_pos_meter.avg), curr_iter) self.writer.add_scalar('train/D_neg', float(d_neg_meter.avg), curr_iter) self.writer.add_scalar('train/Accuracy', float(acc_meter.avg), curr_iter) print(f"Epoch: {epoch} [{iter+1:4d}/{num_iter}] " f"desc loss: {desc_loss_meter.avg:.2f} " f"det loss: {det_loss_meter.avg:.2f} " f"acc: {acc_meter.avg:.2f} " f"d_pos: {d_pos_meter.avg:.2f} " f"d_neg: {d_neg_meter.avg:.2f} " f"data time: {data_timer.avg:.2f}s " f"model time: {model_timer.avg:.2f}s") # finish one epoch epoch_time = model_timer.total_time + data_timer.total_time print(f'Epoch {epoch}: Desc Loss: {desc_loss_meter.avg:.2f}, Det Loss : {det_loss_meter.avg:.2f}, Accuracy: {acc_meter.avg:.2f}, D_pos: {d_pos_meter.avg:.2f}, D_neg: {d_neg_meter.avg:.2f}, time {epoch_time:.2f}s')
def test_kitti(model, dataset, config): # self.sess.run(dataset.test_init_op) import sys use_random_points = False icp_save_path = "d3feat_output" if use_random_points: num_keypts = 5000 # icp_save_path = f'geometric_registration_kitti/D3Feat_{self.experiment_str}-rand{num_keypts}' else: num_keypts = 250 # icp_save_path = f'geometric_registration_kitti/D3Feat_{self.experiment_str}-pred{num_keypts}' if not os.path.exists(icp_save_path): os.mkdir(icp_save_path) ch = logging.StreamHandler(sys.stdout) logging.getLogger().setLevel(logging.INFO) logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d %H:%M:%S', handlers=[ch]) success_meter, loss_meter, rte_meter, rre_meter = AverageMeter( ), AverageMeter(), AverageMeter(), AverageMeter() feat_timer, reg_timer = Timer(), Timer() for i in range(dataset.length): import pdb pdb.set_trace() # feat_timer.tic() ops = [ model.anchor_inputs, model.out_features, model.out_scores, model.anc_id, model.pos_id, model.accuracy ] [inputs, features, scores, anc_id, pos_id, accuracy] = self.sess.run(ops, {model.dropout_prob: 1.0}) # feat_timer.toc() # print(accuracy, anc_id) stack_lengths = inputs['stack_lengths'] first_pcd_indices = np.arange(stack_lengths[0]) second_pcd_indices = np.arange(stack_lengths[1]) + stack_lengths[0] # anc_points = inputs['points'][0][first_pcd_indices] # pos_points = inputs['points'][0][second_pcd_indices] # anc_features = features[first_pcd_indices] # pos_features = features[second_pcd_indices] # anc_scores = scores[first_pcd_indices] # pos_scores = scores[second_pcd_indices] if use_random_points: anc_keypoints_id = np.random.choice(stack_lengths[0], num_keypts) pos_keypoints_id = np.random.choice( stack_lengths[1], num_keypts) + stack_lengths[0] anc_points = inputs['points'][0][anc_keypoints_id] pos_points = inputs['points'][0][pos_keypoints_id] anc_features = features[anc_keypoints_id] pos_features = features[pos_keypoints_id] anc_scores = scores[anc_keypoints_id] pos_scores = scores[pos_keypoints_id] else: scores_anc_pcd = scores[first_pcd_indices] scores_pos_pcd = scores[second_pcd_indices] anc_keypoints_id = np.argsort(scores_anc_pcd, axis=0)[-num_keypts:].squeeze() pos_keypoints_id = np.argsort( scores_pos_pcd, axis=0)[-num_keypts:].squeeze() + stack_lengths[0] anc_points = inputs['points'][0][anc_keypoints_id] anc_features = features[anc_keypoints_id] anc_scores = scores[anc_keypoints_id] pos_points = inputs['points'][0][pos_keypoints_id] pos_features = features[pos_keypoints_id] pos_scores = scores[pos_keypoints_id] pcd0 = make_open3d_point_cloud(anc_points) pcd1 = make_open3d_point_cloud(pos_points) feat0 = make_open3d_feature(anc_features, 32, anc_features.shape[0]) feat1 = make_open3d_feature(pos_features, 32, pos_features.shape[0]) reg_timer.tic() filename = anc_id.decode("utf-8") + "-" + pos_id.decode( "utf-8").split("@")[-1] + '.npz' if os.path.exists(join(icp_save_path, filename)): data = np.load(join(icp_save_path, filename)) T_ransac = data['trans'] print(f"Read from {join(icp_save_path, filename)}") else: distance_threshold = dataset.voxel_size * 1.0 ransac_result = open3d.registration.registration_ransac_based_on_feature_matching( pcd0, pcd1, feat0, feat1, distance_threshold, open3d.registration.TransformationEstimationPointToPoint( False), 4, [ open3d.registration. CorrespondenceCheckerBasedOnEdgeLength(0.9), open3d.registration. CorrespondenceCheckerBasedOnDistance( distance_threshold) ], open3d.registration.RANSACConvergenceCriteria(50000, 1000) # open3d.registration.RANSACConvergenceCriteria(4000000, 10000) ) # print(ransac_result) T_ransac = ransac_result.transformation.astype(np.float32) np.savez(join(icp_save_path, filename), trans=T_ransac, anc_pts=anc_points, pos_pts=pos_points, anc_scores=anc_scores, pos_scores=pos_scores) reg_timer.toc() T_gth = inputs['trans'] # loss_ransac = corr_dist(T_ransac, T_gth, anc_points, pos_points, weight=None, max_dist=1) loss_ransac = 0 rte = np.linalg.norm(T_ransac[:3, 3] - T_gth[:3, 3]) rre = np.arccos( (np.trace(T_ransac[:3, :3].transpose() @ T_gth[:3, :3]) - 1) / 2) if rte < 2: rte_meter.update(rte) if not np.isnan(rre) and rre < np.pi / 180 * 5: rre_meter.update(rre * 180 / np.pi) if rte < 2 and not np.isnan(rre) and rre < np.pi / 180 * 5: success_meter.update(1) else: success_meter.update(0) logging.info( f"{anc_id} Failed with RTE: {rte}, RRE: {rre * 180 / np.pi}" ) loss_meter.update(loss_ransac) if (i + 1) % 10 == 0: logging.info( f"{i+1} / {dataset.num_test}: Feat time: {feat_timer.avg}," + f" Reg time: {reg_timer.avg}, Loss: {loss_meter.avg}, RTE: {rte_meter.avg}," + f" RRE: {rre_meter.avg}, Success: {success_meter.sum} / {success_meter.count}" + f" ({success_meter.avg * 100} %)") feat_timer.reset() reg_timer.reset() logging.info( f"Total loss: {loss_meter.avg}, RTE: {rte_meter.avg}, var: {rte_meter.var}," + f" RRE: {rre_meter.avg}, var: {rre_meter.var}, Success: {success_meter.sum} " + f"/ {success_meter.count} ({success_meter.avg * 100} %)")
def register_one_scene(inlier_ratio_threshold, distance_threshold, save_path, return_dict, scene): gt_matches = 0 pred_matches = 0 keyptspath = f"{save_path}/keypoints/{scene}" descpath = f"{save_path}/descriptors/{scene}" scorepath = f"{save_path}/scores/{scene}" gtpath = f'geometric_registration/gt_result/{scene}-evaluation/' gtLog = loadlog(gtpath) inlier_num_meter, inlier_ratio_meter = AverageMeter(), AverageMeter() pcdpath = f"{config.root}/fragments/{scene}/" num_frag = len([ filename for filename in os.listdir(pcdpath) if filename.endswith('ply') ]) for id1 in range(num_frag): for id2 in range(id1 + 1, num_frag): cloud_bin_s = f'cloud_bin_{id1}' cloud_bin_t = f'cloud_bin_{id2}' key = f"{id1}_{id2}" if key not in gtLog.keys(): # skip the pairs that have less than 30% overlap. num_inliers = 0 inlier_ratio = 0 gt_flag = 0 else: source_keypts = get_keypts(keyptspath, cloud_bin_s) target_keypts = get_keypts(keyptspath, cloud_bin_t) source_desc = get_desc(descpath, cloud_bin_s, 'D3Feat') target_desc = get_desc(descpath, cloud_bin_t, 'D3Feat') source_score = get_scores(scorepath, cloud_bin_s, 'D3Feat').squeeze() target_score = get_scores(scorepath, cloud_bin_t, 'D3Feat').squeeze() source_desc = np.nan_to_num(source_desc) target_desc = np.nan_to_num(target_desc) # randomly select 5000 keypts if args.random_points: source_indices = np.random.choice( range(source_keypts.shape[0]), args.num_points) target_indices = np.random.choice( range(target_keypts.shape[0]), args.num_points) else: source_indices = np.argsort( source_score)[-args.num_points:] target_indices = np.argsort( target_score)[-args.num_points:] source_keypts = source_keypts[source_indices, :] source_desc = source_desc[source_indices, :] target_keypts = target_keypts[target_indices, :] target_desc = target_desc[target_indices, :] corr = build_correspondence(source_desc, target_desc) gt_trans = gtLog[key] frag1 = source_keypts[corr[:, 0]] frag2_pc = o3d.geometry.PointCloud() frag2_pc.points = o3d.utility.Vector3dVector( target_keypts[corr[:, 1]]) frag2_pc.transform(gt_trans) frag2 = np.asarray(frag2_pc.points) distance = np.sqrt(np.sum(np.power(frag1 - frag2, 2), axis=1)) num_inliers = np.sum(distance < distance_threshold) inlier_ratio = num_inliers / len(distance) if inlier_ratio > inlier_ratio_threshold: pred_matches += 1 gt_matches += 1 inlier_num_meter.update(num_inliers) inlier_ratio_meter.update(inlier_ratio) recall = pred_matches * 100.0 / gt_matches return_dict[scene] = [recall, inlier_num_meter.avg, inlier_ratio_meter.avg] logging.info( f"{scene}: Recall={recall:.2f}%, inlier ratio={inlier_ratio_meter.avg*100:.2f}%, inlier num={inlier_num_meter.avg:.2f}" ) return recall, inlier_num_meter.avg, inlier_ratio_meter.avg