def evaluate(test_loader, model, epoch, config):
    """
    Evaluate.

    :param test_loader: DataLoader for test data
    :param model: model
    """

    # Make sure it's in eval mode
    model.eval()
    pp = pprint.PrettyPrinter()

    # Lists to store detected and true boxes, labels, scores
    det_boxes = list()
    det_labels = list()
    det_scores = list()
    true_boxes = list()
    true_labels = list()
    true_difficulties = list(
    )  # it is necessary to know which objects are 'difficult', see 'calculate_mAP' in utils.py
    detect_speed = list()

    with torch.no_grad():
        # Batches
        for i, (images, boxes, labels,
                difficulties) in enumerate(tqdm(test_loader,
                                                desc='Evaluating')):
            images = images.to(config.device)  # (N, 3, 300, 300)

            # Forward prop.
            time_start = time.time()
            predicted_locs, predicted_scores = model(images)

            # Detect objects in SSD output
            det_boxes_batch, det_labels_batch, det_scores_batch = \
                model.detect_objects(predicted_locs,
                                     predicted_scores,
                                     min_score=0.01,
                                     max_overlap=0.45,
                                     top_k=200)
            time_end = time.time()

            # Evaluation MUST be at min_score=0.01, max_overlap=0.45, top_k=200
            # for fair comparision with the paper's results and other repos

            # Store this batch's results for mAP calculation
            boxes = [b.to(config.device) for b in boxes]
            labels = [l.to(config.device) for l in labels]
            difficulties = [d.to(config.device) for d in difficulties]

            det_boxes.extend(det_boxes_batch)
            det_labels.extend(det_labels_batch)
            det_scores.extend(det_scores_batch)
            true_boxes.extend(boxes)
            true_labels.extend(labels)
            true_difficulties.extend(difficulties)
            detect_speed.append((time_end - time_start) / len(labels))

        # Calculate mAP
        APs, mAP = calculate_mAP(det_boxes, det_labels, det_scores, true_boxes,
                                 true_labels, true_difficulties,
                                 config.label_map, config.device)

    # Print AP for each class
    pp.pprint(APs)

    # # added to resume training
    # model.train()

    str_print = 'EVAL: Mean Average Precision {0:.3f}, avg speed {1:.2f} Hz'.format(
        mAP, 1. / np.mean(detect_speed))
    config.logger.info(str_print)

    return APs, mAP
def evaluate(test_loader, model, optimizer, config):
    """
    Evaluate.

    :param test_loader: DataLoader for test data
    :param model: model
    """

    # Make sure it's in eval mode
    torch.cuda.empty_cache()
    model.eval()

    pp = pprint.PrettyPrinter()

    # Lists to store detected and true boxes, labels, scores
    det_boxes = list()
    det_labels = list()
    det_scores = list()
    true_boxes = list()
    true_labels = list()
    true_difficulties = list()
    detect_speed = list()

    with torch.no_grad():
        # Batches
        for i, (images, boxes, labels, _,
                difficulties) in enumerate(tqdm(test_loader,
                                                desc='Evaluating')):
            images = images.to(config.device)
            boxes = [b.to(config.device) for b in boxes]
            labels = [l.to(config.device) for l in labels]
            difficulties = [d.to(config.device) for d in difficulties]

            # Forward prop.
            time_start = time.time()
            _, _, _, _, predicted_locs, predicted_scores, prior_positives_idx = model(
                images)

            if config.data_name.upper() == 'COCO':
                det_boxes_batch, det_labels_batch, det_scores_batch = \
                    detect(predicted_locs,
                           predicted_scores,
                           min_score=config.nms['min_score'],
                           max_overlap=config.nms['max_overlap'],
                           top_k=config.nms['top_k'], priors_cxcy=model.priors_cxcy,
                           config=config)
            elif config.data_name.upper() == 'VOC':
                det_boxes_batch, det_labels_batch, det_scores_batch = \
                    detect(predicted_locs,
                           predicted_scores,
                           min_score=config.nms['min_score'],
                           max_overlap=config.nms['max_overlap'],
                           top_k=config.nms['top_k'], priors_cxcy=model.priors_cxcy,
                           config=config, prior_positives_idx=None)
            else:
                raise NotImplementedError

            time_end = time.time()
            # Evaluation MUST be at min_score=0.01, max_overlap=0.45, top_k=200
            # for fair comparision with the paper's results and other repos

            det_boxes.extend(det_boxes_batch)
            det_labels.extend(det_labels_batch)
            det_scores.extend(det_scores_batch)
            true_boxes.extend(boxes)
            true_labels.extend(labels)
            true_difficulties.extend(difficulties)
            detect_speed.append((time_end - time_start) / len(labels))

        # Calculate mAP
        APs, mAP = calculate_mAP(det_boxes, det_labels, det_scores, true_boxes,
                                 true_labels, true_difficulties, 0.5,
                                 config.label_map, config.device)

    # Print AP for each class
    pp.pprint(APs)

    # # added to resume training
    # model.train()

    str_print = 'EVAL: Mean Average Precision {0:.3f}, avg speed {1:.2f} Hz'.format(
        mAP, 1. / np.mean(detect_speed))
    config.logger.info(str_print)

    del predicted_locs, predicted_scores, boxes, labels, images, difficulties

    return APs, mAP
Exemplo n.º 3
0
def evaluate(test_loader, model, optimizer, config):
    """
    Evaluate.

    :param test_loader: DataLoader for test data
    :param model: model
    """

    # Make sure it's in eval mode
    model.train()

    pp = pprint.PrettyPrinter()

    # Lists to store detected and true boxes, labels, scores
    det_boxes = list()
    det_labels = list()
    det_scores = list()
    true_boxes = list()
    true_labels = list()
    true_difficulties = list()
    detect_speed = list()
    COCO_format_results = list()
    image_all_ids = list()

    with torch.no_grad():
        # Batches
        for i, (images, boxes, labels, ids,
                difficulties) in enumerate(tqdm(test_loader,
                                                desc='Evaluating')):
            images = images.to(config.device)
            boxes = [b.to(config.device) for b in boxes]
            labels = [l.to(config.device) for l in labels]
            difficulties = [d.to(config.device) for d in difficulties]

            # Forward prop.
            time_start = time.time()
            outputs = model(images)

            if len(outputs) == 2:
                predicted_locs, predicted_scores = model(images)
                prior_positives_idx = None
            elif len(outputs) == 7:
                _, _, _, _, predicted_locs, predicted_scores, prior_positives_idx = outputs
            else:
                raise NotImplementedError

            # Detect objects
            if config['focal_type'].lower() == 'sigmoid':
                det_boxes_batch, det_labels_batch, det_scores_batch = \
                    detect_focal(predicted_locs,
                                 predicted_scores,
                                 min_score=config.nms['min_score'],
                                 max_overlap=config.nms['max_overlap'],
                                 top_k=config.nms['top_k'], priors_cxcy=model.priors_cxcy,
                                 config=config, prior_positives_idx=prior_positives_idx)
            elif config['focal_type'].lower() == 'softmax':
                det_boxes_batch, det_labels_batch, det_scores_batch = \
                    detect(predicted_locs,
                           predicted_scores,
                           min_score=config.nms['min_score'],
                           max_overlap=config.nms['max_overlap'],
                           top_k=config.nms['top_k'], priors_cxcy=model.priors_cxcy,
                           config=config, prior_positives_idx=prior_positives_idx)
            else:
                print('focal type should be either softmax or sigmoid.')
                raise NotImplementedError

            time_end = time.time()

            det_boxes.extend(det_boxes_batch)
            det_labels.extend(det_labels_batch)
            det_scores.extend(det_scores_batch)
            true_boxes.extend(boxes)
            true_labels.extend(labels)
            true_difficulties.extend(difficulties)
            detect_speed.append((time_end - time_start) / len(labels))

            if config.data_name.upper() == 'COCO':
                # store results in COCO formats
                det_boxes_batch = [b.cpu() for b in det_boxes_batch]
                det_labels_batch = [b.cpu() for b in det_labels_batch]
                det_scores_batch = [b.cpu() for b in det_scores_batch]
                for j in range(len(ids)):
                    img = config.coco.loadImgs(ids[j])[0]
                    width = img['width'] * 1.
                    height = img['height'] * 1.
                    bboxes = det_boxes_batch[j]
                    bboxes[:, 2] -= bboxes[:, 0]
                    bboxes[:, 3] -= bboxes[:, 1]
                    bboxes[:, 0] *= width
                    bboxes[:, 2] *= width
                    bboxes[:, 1] *= height
                    bboxes[:, 3] *= height
                    for box_idx in range(det_boxes_batch[j].size(0)):
                        score = float(det_scores_batch[j][box_idx])
                        label_name = config.rev_coco_label_map[str(
                            int(det_labels_batch[j][box_idx]))]
                        label = config.coco.getCatIds(catNms=[label_name])[0]
                        bbox = bboxes[box_idx, :].tolist()
                        image_result = {
                            'image_id': ids[j],
                            'category_id': label,
                            'score': score,
                            'bbox': bbox
                        }
                        COCO_format_results.append(image_result)
                        image_all_ids.append(ids[j])

        # Calculate mAP
        if config.data_name.upper() == 'VOC':
            APs, mAP = calculate_mAP(det_boxes, det_labels, det_scores,
                                     true_boxes, true_labels,
                                     true_difficulties, 0.5, config.label_map,
                                     config.device)

            # Print AP for each class
            pp.pprint(APs)
            details = pprint.pformat(APs)
            config.logger.info(details)

            str_print = 'EVAL: Mean Average Precision {0:.4f}, ' \
                        'avg speed {1:.3f} Hz'.format(mAP, 1. / np.mean(detect_speed))
            config.logger.info(str_print)

        if config.data_name.upper() == 'DETRAC':
            APs, mAP = calculate_mAP(det_boxes, det_labels, det_scores,
                                     true_boxes, true_labels,
                                     true_difficulties, 0.7, config.label_map,
                                     config.device)

            # Print AP for each class
            pp.pprint(APs)
            details = pprint.pformat(APs)
            config.logger.info(details)

            str_print = 'EVAL: Mean Average Precision {0:.4f}, ' \
                        'avg speed {1:.3f} Hz'.format(mAP, 1. / np.mean(detect_speed))
            config.logger.info(str_print)

        if config.data_name.upper() == 'COCO':
            json.dump(COCO_format_results,
                      open(
                          '{}/{}_bbox_results.json'.format(
                              config.save_path, config.data_name), 'w'),
                      indent=4)
            # run COCO evaluation
            coco_pred = config.coco.loadRes('{}/{}_bbox_results.json'.format(
                config.save_path, config.data_name))
            coco_eval = COCOeval(config.coco, coco_pred, 'bbox')
            coco_eval.params.imgIds = image_all_ids
            coco_eval.evaluate()
            coco_eval.accumulate()
            coco_eval.summarize()

    return