Ejemplo n.º 1
0
def evaluate_one_epoch():
    stat_dict = {}

    ap_calculator = APCalculator(ap_iou_thresh=FLAGS.ap_iou_thresh,
                                 class2type_map=DATASET_CONFIG.class2type)
    ap_calculator_l = APCalculator(ap_iou_thresh=FLAGS.ap_iou_thresh * 2,
                                   class2type_map=DATASET_CONFIG.class2type)

    net.eval()  # set model to eval mode (for bn and dp)
    for batch_idx, batch_data_label in enumerate(TEST_DATALOADER):
        end_points = {}
        if batch_idx % 10 == 0:
            print('Eval batch: %d' % (batch_idx))
        for key in batch_data_label:
            batch_data_label[key] = batch_data_label[key].to(device)

        # Forward pass
        inputs = {'point_clouds': batch_data_label['point_clouds']}
        with torch.no_grad():
            end_points = net(inputs, end_points)

        # Compute loss
        for key in batch_data_label:
            end_points[key] = batch_data_label[key]
        loss, end_points = criterion(inputs, end_points, DATASET_CONFIG)

        # Accumulate statistics and print out
        for key in end_points:
            if 'loss' in key or 'acc' in key or 'ratio' in key:
                if key not in stat_dict: stat_dict[key] = 0
                stat_dict[key] += end_points[key].item()

        batch_pred_map_cls = parse_predictions(
            end_points, CONFIG_DICT, opt_ang=(FLAGS.dataset == 'sunrgbd'))
        batch_gt_map_cls = parse_groundtruths(end_points, CONFIG_DICT)
        ap_calculator.step(batch_pred_map_cls, batch_gt_map_cls)

        batch_pred_map_cls = parse_predictions(
            end_points, CONFIG_DICT_L, opt_ang=(FLAGS.dataset == 'sunrgbd'))
        batch_gt_map_cls = parse_groundtruths(end_points, CONFIG_DICT_L)
        ap_calculator_l.step(batch_pred_map_cls, batch_gt_map_cls)

        if FLAGS.dump_results:
            dump_results(end_points, DUMP_DIR + '/result/', DATASET_CONFIG,
                         TEST_DATASET)

    # Log statistics
    for key in sorted(stat_dict.keys()):
        log_string('eval mean %s: %f' % (key, stat_dict[key] /
                                         (float(batch_idx + 1))))

    metrics_dict = ap_calculator.compute_metrics()
    for key in metrics_dict:
        log_string('iou = 0.25, eval %s: %f' % (key, metrics_dict[key]))
    metrics_dict = ap_calculator_l.compute_metrics()
    for key in metrics_dict:
        log_string('iou = 0.5, eval %s: %f' % (key, metrics_dict[key]))

    mean_loss = stat_dict['loss'] / float(batch_idx + 1)
    return mean_loss
Ejemplo n.º 2
0
def evaluate_one_epoch():
    stat_dict = {}  # collect statistics
    ap_calculator = APCalculator(ap_iou_thresh=FLAGS.ap_iou_thresh,
                                 class2type_map=DATASET_CONFIG.class2type)
    net.eval()  # set model to eval mode (for bn and dp)
    for batch_idx, batch_data_label in enumerate(TEST_DATALOADER):
        if batch_idx % 10 == 0:
            print('Eval batch: %d' % (batch_idx))
        for key in batch_data_label:
            batch_data_label[key] = batch_data_label[key].to(device)

        # Forward pass
        inputs = {'point_clouds': batch_data_label['point_clouds']}
        with torch.no_grad():
            end_points = net(inputs)

        end_points['relation_type'] = FLAGS.relation_type
        end_points['relation_pair'] = FLAGS.relation_pair

        # Compute loss
        for key in batch_data_label:
            assert (key not in end_points)
            end_points[key] = batch_data_label[key]
        loss, end_points = criterion(end_points, DATASET_CONFIG)

        # Accumulate statistics and print out
        for key in end_points:
            if 'loss' in key or 'acc' in key or 'ratio' in key:
                if key not in stat_dict: stat_dict[key] = 0
                stat_dict[key] += end_points[key].item()

        batch_pred_map_cls = parse_predictions(end_points, CONFIG_DICT)
        batch_gt_map_cls = parse_groundtruths(end_points, CONFIG_DICT)
        ap_calculator.step(batch_pred_map_cls, batch_gt_map_cls)

        # Dump evaluation results for visualization
        if FLAGS.dump_results and batch_idx == 0 and EPOCH_CNT % 10 == 0:
            MODEL.dump_results(end_points, DUMP_DIR, DATASET_CONFIG)

    # Log statistics
    TEST_VISUALIZER.log_scalars(
        {key: stat_dict[key] / float(batch_idx + 1)
         for key in stat_dict},
        (EPOCH_CNT + 1) * len(TRAIN_DATALOADER) * BATCH_SIZE)
    for key in sorted(stat_dict.keys()):
        log_string('eval mean %s: %f' % (key, stat_dict[key] /
                                         (float(batch_idx + 1))))

    # Evaluate average precision
    metrics_dict = ap_calculator.compute_metrics()
    for key in metrics_dict:
        log_string('eval %s: %f' % (key, metrics_dict[key]))

    mean_loss = stat_dict['loss'] / float(batch_idx + 1)
    return mean_loss
Ejemplo n.º 3
0
def evaluate_one_epoch():
    stat_dict = {}  # collect statistics
    ap_calculator = APCalculator(ap_iou_thresh=FLAGS.ap_iou_thresh,
                                 class2type_map=DATASET_CONFIG.class2type)
    detector.eval()  # set model to eval mode (for bn and dp)
    for batch_idx, batch_data_label in enumerate(TEST_DATALOADER):
        for key in batch_data_label:
            batch_data_label[key] = batch_data_label[key].to(device)

        # Forward pass
        inputs = {'point_clouds': batch_data_label['point_clouds']}
        with torch.no_grad():
            end_points = detector(inputs)

        # Compute loss
        for key in batch_data_label:
            assert (key not in end_points)
            end_points[key] = batch_data_label[key]
        loss, end_points = test_detector_criterion(end_points, DATASET_CONFIG)

        # Accumulate statistics and print out
        for key in end_points:
            if 'loss' in key or 'acc' in key or 'ratio' in key:
                if key not in stat_dict: stat_dict[key] = 0
                stat_dict[key] += end_points[key].item()

        batch_pred_map_cls = parse_predictions(end_points, CONFIG_DICT)
        batch_gt_map_cls = parse_groundtruths(end_points, CONFIG_DICT)
        ap_calculator.step(batch_pred_map_cls, batch_gt_map_cls)

        # Log statistics
    TEST_VISUALIZER.log_scalars(
        {key: stat_dict[key] / float(batch_idx + 1)
         for key in stat_dict},
        (EPOCH_CNT + 1) * len(LABELED_DATALOADER) * BATCH_SIZE)
    for key in sorted(stat_dict.keys()):
        log_string('eval mean %s: %f' % (key, stat_dict[key] /
                                         (float(batch_idx + 1))))

    # Evaluate average precision
    metrics_dict = ap_calculator.compute_metrics()
    for key in metrics_dict:
        log_string('eval %s: %f' % (key, metrics_dict[key]))

    TEST_VISUALIZER.log_scalars(
        {
            'mAP': metrics_dict['mAP'],
            'AR': metrics_dict['AR']
        }, (EPOCH_CNT + 1) * len(LABELED_DATALOADER) * BATCH_SIZE)

    mean_loss = stat_dict['detection_loss'] / float(batch_idx + 1)
    return mean_loss
Ejemplo n.º 4
0
def evaluate_one_epoch():
    stat_dict = {} # collect statistics
    ap_calculator = APCalculator(ap_iou_thresh=FLAGS.ap_iou_thresh,
        class2type_map=DATASET_CONFIG.class2type)
    ap_calculator_l = APCalculator(ap_iou_thresh=FLAGS.ap_iou_thresh*2,
        class2type_map=DATASET_CONFIG.class2type)

    net.eval() # set model to eval mode (for bn and dp)

    time_file = 'time_file_%s.txt' % FLAGS.dataset
    time_file = os.path.join(DUMP_DIR, time_file)
    f = open(time_file, 'w')
    all_time = 0
    for batch_idx, batch_data_label in enumerate(TEST_DATALOADER):
        if batch_idx % 10 == 0:
            print('Eval batch: %d'%(batch_idx))
        end_points = {}
        for key in batch_data_label:
            batch_data_label[key] = batch_data_label[key].to(device)
        inputs = {'point_clouds': batch_data_label['point_clouds']}

        tic = time.time()
        with torch.no_grad():
            end_points = net(inputs, end_points)
        toc = time.time()
        t = toc - tic
        all_time += t
        f.write('batch_idx:%d, infer time:%f\n' % (batch_idx, t))
        print('Inference time: %f'%(t))

        # Compute loss
        for key in batch_data_label:
            end_points[key] = batch_data_label[key]
        loss, end_points = criterion(inputs, end_points, DATASET_CONFIG)

        # Accumulate statistics and print out
        for key in end_points:
            if 'loss' in key or 'acc' in key or 'ratio' in key:
                if key not in stat_dict: stat_dict[key] = 0
                stat_dict[key] += end_points[key].item()

        batch_pred_map_cls = parse_predictions(end_points, CONFIG_DICT, opt_ang=(FLAGS.dataset == 'sunrgbd'))
        batch_gt_map_cls = parse_groundtruths(end_points, CONFIG_DICT) 
        ap_calculator.step(batch_pred_map_cls, batch_gt_map_cls)

        batch_pred_map_cls = parse_predictions(end_points, CONFIG_DICT_L, opt_ang=(FLAGS.dataset == 'sunrgbd'))
        batch_gt_map_cls = parse_groundtruths(end_points, CONFIG_DICT_L) 
        ap_calculator_l.step(batch_pred_map_cls, batch_gt_map_cls)

        if FLAGS.dump_results:
            dump_results(end_points, DUMP_DIR+'/result/', DATASET_CONFIG, TEST_DATASET)

    mean_time = all_time/float(batch_idx+1)
    f.write('Batch number:%d\n' % (batch_idx+1))
    f.write('mean infer time: %f\n' % (mean_time))
    f.close()
    print('Mean inference time: %f'%(mean_time))
    # Log statistics
    TEST_VISUALIZER.log_scalars({key:stat_dict[key]/float(batch_idx+1) for key in stat_dict},
        (EPOCH_CNT+1)*len(TRAIN_DATALOADER)*BATCH_SIZE)
    for key in sorted(stat_dict.keys()):
        log_string('eval mean %s: %f'%(key, stat_dict[key]/(float(batch_idx+1))))

    metrics_dict = ap_calculator.compute_metrics()
    for key in metrics_dict:
        log_string('eval %s: %f'%(key, metrics_dict[key]))
    metrics_dict = ap_calculator_l.compute_metrics()
    for key in metrics_dict:
        log_string('eval %s: %f'%(key, metrics_dict[key]))

    mean_loss = stat_dict['loss']/float(batch_idx+1)
    return mean_loss
Ejemplo n.º 5
0
        size_batch,
        sem_class_normalized_batch,
        conf_thresh,
        nms_iou,
        DC.num_class,
        per_class_proposal=per_class_proposal,
        cls_nms=cls_nms)
    batch_gt_map_cls = parse_groundtruths(
        center_label.astype(np.float32),
        heading_class_label.astype(np.float32),
        heading_residual_label.astype(np.float32),
        size_class_label.astype(np.float32),
        size_residual_label.astype(np.float32),
        sem_cls_label.astype(np.float32), box_label_mask.astype(np.float32),
        DC)
    ap_calculator.step(batch_pred_map_cls, batch_gt_map_cls)

    if i_batch % 20 == 19:
        print("Done", i_batch + 1, 'of', len(dataset) + 1)

    # visualize the first batch
    if visualize_first_batch and i_batch == 0:
        for i in range(batch_size):
            # take one point cloud
            points_xyz = pcd[i, :, :3]
            objectness_score_normalized = objectness_score_normalized_batch[i]
            center = center_batch[i]
            heading = heading_batch[i]
            size = size_batch[i]
            sem_class_class_normalized = sem_class_normalized_batch[i]
            seeds_xyz = seeds_xyz_batch[i]