def evaluate_one_epoch(): stat_dict = {} ap_calculator = APCalculator(ap_iou_thresh=FLAGS.ap_iou_thresh, class2type_map=DATASET_CONFIG.class2type) ap_calculator_l = APCalculator(ap_iou_thresh=FLAGS.ap_iou_thresh * 2, class2type_map=DATASET_CONFIG.class2type) net.eval() # set model to eval mode (for bn and dp) for batch_idx, batch_data_label in enumerate(TEST_DATALOADER): end_points = {} if batch_idx % 10 == 0: print('Eval batch: %d' % (batch_idx)) for key in batch_data_label: batch_data_label[key] = batch_data_label[key].to(device) # Forward pass inputs = {'point_clouds': batch_data_label['point_clouds']} with torch.no_grad(): end_points = net(inputs, end_points) # Compute loss for key in batch_data_label: end_points[key] = batch_data_label[key] loss, end_points = criterion(inputs, end_points, DATASET_CONFIG) # Accumulate statistics and print out for key in end_points: if 'loss' in key or 'acc' in key or 'ratio' in key: if key not in stat_dict: stat_dict[key] = 0 stat_dict[key] += end_points[key].item() batch_pred_map_cls = parse_predictions( end_points, CONFIG_DICT, opt_ang=(FLAGS.dataset == 'sunrgbd')) batch_gt_map_cls = parse_groundtruths(end_points, CONFIG_DICT) ap_calculator.step(batch_pred_map_cls, batch_gt_map_cls) batch_pred_map_cls = parse_predictions( end_points, CONFIG_DICT_L, opt_ang=(FLAGS.dataset == 'sunrgbd')) batch_gt_map_cls = parse_groundtruths(end_points, CONFIG_DICT_L) ap_calculator_l.step(batch_pred_map_cls, batch_gt_map_cls) if FLAGS.dump_results: dump_results(end_points, DUMP_DIR + '/result/', DATASET_CONFIG, TEST_DATASET) # Log statistics for key in sorted(stat_dict.keys()): log_string('eval mean %s: %f' % (key, stat_dict[key] / (float(batch_idx + 1)))) metrics_dict = ap_calculator.compute_metrics() for key in metrics_dict: log_string('iou = 0.25, eval %s: %f' % (key, metrics_dict[key])) metrics_dict = ap_calculator_l.compute_metrics() for key in metrics_dict: log_string('iou = 0.5, eval %s: %f' % (key, metrics_dict[key])) mean_loss = stat_dict['loss'] / float(batch_idx + 1) return mean_loss
def evaluate_one_epoch(): stat_dict = {} # collect statistics ap_calculator = APCalculator(ap_iou_thresh=FLAGS.ap_iou_thresh, class2type_map=DATASET_CONFIG.class2type) net.eval() # set model to eval mode (for bn and dp) for batch_idx, batch_data_label in enumerate(TEST_DATALOADER): if batch_idx % 10 == 0: print('Eval batch: %d' % (batch_idx)) for key in batch_data_label: batch_data_label[key] = batch_data_label[key].to(device) # Forward pass inputs = {'point_clouds': batch_data_label['point_clouds']} with torch.no_grad(): end_points = net(inputs) end_points['relation_type'] = FLAGS.relation_type end_points['relation_pair'] = FLAGS.relation_pair # Compute loss for key in batch_data_label: assert (key not in end_points) end_points[key] = batch_data_label[key] loss, end_points = criterion(end_points, DATASET_CONFIG) # Accumulate statistics and print out for key in end_points: if 'loss' in key or 'acc' in key or 'ratio' in key: if key not in stat_dict: stat_dict[key] = 0 stat_dict[key] += end_points[key].item() batch_pred_map_cls = parse_predictions(end_points, CONFIG_DICT) batch_gt_map_cls = parse_groundtruths(end_points, CONFIG_DICT) ap_calculator.step(batch_pred_map_cls, batch_gt_map_cls) # Dump evaluation results for visualization if FLAGS.dump_results and batch_idx == 0 and EPOCH_CNT % 10 == 0: MODEL.dump_results(end_points, DUMP_DIR, DATASET_CONFIG) # Log statistics TEST_VISUALIZER.log_scalars( {key: stat_dict[key] / float(batch_idx + 1) for key in stat_dict}, (EPOCH_CNT + 1) * len(TRAIN_DATALOADER) * BATCH_SIZE) for key in sorted(stat_dict.keys()): log_string('eval mean %s: %f' % (key, stat_dict[key] / (float(batch_idx + 1)))) # Evaluate average precision metrics_dict = ap_calculator.compute_metrics() for key in metrics_dict: log_string('eval %s: %f' % (key, metrics_dict[key])) mean_loss = stat_dict['loss'] / float(batch_idx + 1) return mean_loss
def evaluate_one_epoch(): stat_dict = {} # collect statistics ap_calculator = APCalculator(ap_iou_thresh=FLAGS.ap_iou_thresh, class2type_map=DATASET_CONFIG.class2type) detector.eval() # set model to eval mode (for bn and dp) for batch_idx, batch_data_label in enumerate(TEST_DATALOADER): for key in batch_data_label: batch_data_label[key] = batch_data_label[key].to(device) # Forward pass inputs = {'point_clouds': batch_data_label['point_clouds']} with torch.no_grad(): end_points = detector(inputs) # Compute loss for key in batch_data_label: assert (key not in end_points) end_points[key] = batch_data_label[key] loss, end_points = test_detector_criterion(end_points, DATASET_CONFIG) # Accumulate statistics and print out for key in end_points: if 'loss' in key or 'acc' in key or 'ratio' in key: if key not in stat_dict: stat_dict[key] = 0 stat_dict[key] += end_points[key].item() batch_pred_map_cls = parse_predictions(end_points, CONFIG_DICT) batch_gt_map_cls = parse_groundtruths(end_points, CONFIG_DICT) ap_calculator.step(batch_pred_map_cls, batch_gt_map_cls) # Log statistics TEST_VISUALIZER.log_scalars( {key: stat_dict[key] / float(batch_idx + 1) for key in stat_dict}, (EPOCH_CNT + 1) * len(LABELED_DATALOADER) * BATCH_SIZE) for key in sorted(stat_dict.keys()): log_string('eval mean %s: %f' % (key, stat_dict[key] / (float(batch_idx + 1)))) # Evaluate average precision metrics_dict = ap_calculator.compute_metrics() for key in metrics_dict: log_string('eval %s: %f' % (key, metrics_dict[key])) TEST_VISUALIZER.log_scalars( { 'mAP': metrics_dict['mAP'], 'AR': metrics_dict['AR'] }, (EPOCH_CNT + 1) * len(LABELED_DATALOADER) * BATCH_SIZE) mean_loss = stat_dict['detection_loss'] / float(batch_idx + 1) return mean_loss
def evaluate_one_epoch(): stat_dict = {} # collect statistics ap_calculator = APCalculator(ap_iou_thresh=FLAGS.ap_iou_thresh, class2type_map=DATASET_CONFIG.class2type) ap_calculator_l = APCalculator(ap_iou_thresh=FLAGS.ap_iou_thresh*2, class2type_map=DATASET_CONFIG.class2type) net.eval() # set model to eval mode (for bn and dp) time_file = 'time_file_%s.txt' % FLAGS.dataset time_file = os.path.join(DUMP_DIR, time_file) f = open(time_file, 'w') all_time = 0 for batch_idx, batch_data_label in enumerate(TEST_DATALOADER): if batch_idx % 10 == 0: print('Eval batch: %d'%(batch_idx)) end_points = {} for key in batch_data_label: batch_data_label[key] = batch_data_label[key].to(device) inputs = {'point_clouds': batch_data_label['point_clouds']} tic = time.time() with torch.no_grad(): end_points = net(inputs, end_points) toc = time.time() t = toc - tic all_time += t f.write('batch_idx:%d, infer time:%f\n' % (batch_idx, t)) print('Inference time: %f'%(t)) # Compute loss for key in batch_data_label: end_points[key] = batch_data_label[key] loss, end_points = criterion(inputs, end_points, DATASET_CONFIG) # Accumulate statistics and print out for key in end_points: if 'loss' in key or 'acc' in key or 'ratio' in key: if key not in stat_dict: stat_dict[key] = 0 stat_dict[key] += end_points[key].item() batch_pred_map_cls = parse_predictions(end_points, CONFIG_DICT, opt_ang=(FLAGS.dataset == 'sunrgbd')) batch_gt_map_cls = parse_groundtruths(end_points, CONFIG_DICT) ap_calculator.step(batch_pred_map_cls, batch_gt_map_cls) batch_pred_map_cls = parse_predictions(end_points, CONFIG_DICT_L, opt_ang=(FLAGS.dataset == 'sunrgbd')) batch_gt_map_cls = parse_groundtruths(end_points, CONFIG_DICT_L) ap_calculator_l.step(batch_pred_map_cls, batch_gt_map_cls) if FLAGS.dump_results: dump_results(end_points, DUMP_DIR+'/result/', DATASET_CONFIG, TEST_DATASET) mean_time = all_time/float(batch_idx+1) f.write('Batch number:%d\n' % (batch_idx+1)) f.write('mean infer time: %f\n' % (mean_time)) f.close() print('Mean inference time: %f'%(mean_time)) # Log statistics TEST_VISUALIZER.log_scalars({key:stat_dict[key]/float(batch_idx+1) for key in stat_dict}, (EPOCH_CNT+1)*len(TRAIN_DATALOADER)*BATCH_SIZE) for key in sorted(stat_dict.keys()): log_string('eval mean %s: %f'%(key, stat_dict[key]/(float(batch_idx+1)))) metrics_dict = ap_calculator.compute_metrics() for key in metrics_dict: log_string('eval %s: %f'%(key, metrics_dict[key])) metrics_dict = ap_calculator_l.compute_metrics() for key in metrics_dict: log_string('eval %s: %f'%(key, metrics_dict[key])) mean_loss = stat_dict['loss']/float(batch_idx+1) return mean_loss
objectness_musk = np.greater_equal( objectness_score_normalized[:, 1], conf_thresh) center_p = center[objectness_musk, :] center_p_pcd = create_pointcloud(center_p, [0, 0, 1]) # blue draw_list = [ seeds_xyz_pcd, points_xyz_pcd, votes_xyz_pcd, center_p_pcd ] # draw_list = [points_xyz_pcd] # ----------------visualize bounding box -------------------------- gt_bboxes = batch_gt_map_cls[i] # draw_list.append(create_bbox(x[1]) for x in gt_bboxes) for x in gt_bboxes: fliped_box = flip_axis_to_depth( x[1]) # flip back to world coordinate draw_list.append(create_bbox(fliped_box, [0, 1, 0])) # green box - GT pred_bbox = batch_pred_map_cls[i] for x in pred_bbox: if x[2] > thresh_viz: # obejctness threshhold for viusualisation fliped_box = flip_axis_to_depth(x[1]) draw_list.append(create_bbox( fliped_box, [1, 0, 0])) # red box -prediction o3d.visualization.draw_geometries(draw_list) print('----------calculating AP and AR------------------') metric_dict = ap_calculator.compute_metrics() print('With threshhold of %.2f 3DIoU :' % ap_iou) for key in metric_dict: print('eval %s: %f' % (key, metric_dict[key]))