Exemple #1
0
    def evaluate_full_dataset(
            self, data_loader: torch.utils.data.DataLoader) -> Dict[str, Any]:
        # This is slow, need to have custom reducer to suppport multi-GPU eval.
        iou_types = tuple(k for k in ("segm", "bbox")
                          if k in self.postprocessors.keys())
        coco_evaluator = CocoEvaluator(self.base_ds, iou_types)
        results = {}
        loss_dict_aggregated = defaultdict(int)
        with torch.no_grad():
            for i, batch in enumerate(data_loader):
                samples, targets = self.context.to_device(batch)
                samples = utils.NestedTensor(samples["tensors"],
                                             samples["mask"])

                outputs = self.model(samples)
                loss_dict = self.criterion(outputs, targets, eval=True)
                weight_dict = self.criterion.weight_dict

                # Compute losses for logging
                loss_dict_scaled = {
                    f"{k}_scaled": v * weight_dict[k]
                    for k, v in loss_dict.items() if k in weight_dict
                }
                loss_dict["sum_unscaled"] = sum(loss_dict.values())
                loss_dict["sum_scaled"] = sum(loss_dict_scaled.values())
                loss_dict.update(loss_dict_scaled)

                for k in loss_dict:
                    loss_dict_aggregated[k] += loss_dict[k]

                orig_target_sizes = torch.stack(
                    [t["orig_size"] for t in targets], dim=0)
                res = self.postprocessors["bbox"](outputs, orig_target_sizes)
                results.update({
                    target["image_id"].item(): output
                    for target, output in zip(targets, res)
                })

        for k in loss_dict_aggregated:
            loss_dict_aggregated[k] /= i + 1

        coco_evaluator.update(results)
        for iou_type in coco_evaluator.iou_types:
            coco_eval = coco_evaluator.coco_eval[iou_type]
            coco_evaluator.eval_imgs[iou_type] = np.concatenate(
                coco_evaluator.eval_imgs[iou_type], 2)
            create_common_coco_eval(coco_eval, coco_evaluator.img_ids,
                                    coco_evaluator.eval_imgs[iou_type])
        coco_evaluator.accumulate()
        coco_evaluator.summarize()

        coco_stats = coco_evaluator.coco_eval["bbox"].stats.tolist()

        loss_dict_aggregated["mAP"] = coco_stats[0]
        loss_dict_aggregated["mAP_50"] = coco_stats[1]
        loss_dict_aggregated["mAP_75"] = coco_stats[2]
        loss_dict_aggregated["mAP_small"] = coco_stats[3]
        loss_dict_aggregated["mAP_medium"] = coco_stats[4]
        loss_dict_aggregated["mAP_large"] = coco_stats[5]
        return loss_dict_aggregated
Exemple #2
0
    def cross_slot_reduce(self, per_slot_metrics):
        coco_evaluator = CocoEvaluator(self.base_ds, self.iou_types)
        if len(self.cat_ids):
            for iou_type in self.iou_types:
                coco_evaluator.coco_eval[iou_type].params.catIds = self.cat_ids
        for results in per_slot_metrics:
            results_dict = {r[0]: r[1] for r in results}
            coco_evaluator.update(results_dict)

        for iou_type in coco_evaluator.iou_types:
            coco_eval = coco_evaluator.coco_eval[iou_type]
            coco_evaluator.eval_imgs[iou_type] = np.concatenate(
                coco_evaluator.eval_imgs[iou_type], 2)
            coco_eval.evalImgs = list(
                coco_evaluator.eval_imgs[iou_type].flatten())
            coco_eval.params.imgIds = list(coco_evaluator.img_ids)
            # We need to perform a deepcopy here since this dictionary can be modified in a
            # custom accumulate call and we don't want that to change coco_eval.params.
            # See https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocotools/cocoeval.py#L315.
            coco_eval._paramsEval = copy.deepcopy(coco_eval.params)
        coco_evaluator.accumulate()
        coco_evaluator.summarize()

        coco_stats = coco_evaluator.coco_eval["bbox"].stats.tolist()

        loss_dict = {}
        loss_dict["mAP"] = coco_stats[0]
        loss_dict["mAP_50"] = coco_stats[1]
        loss_dict["mAP_75"] = coco_stats[2]
        loss_dict["mAP_small"] = coco_stats[3]
        loss_dict["mAP_medium"] = coco_stats[4]
        loss_dict["mAP_large"] = coco_stats[5]
        return loss_dict
Exemple #3
0
def evaluate(model, criterion, postprocessors, data_loader, base_ds, device,
             output_dir):
    model.eval()
    criterion.eval()

    metric_logger = utils.MetricLogger(delimiter="  ")
    metric_logger.add_meter(
        'class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}'))
    header = 'Test:'

    iou_types = tuple(k for k in ('segm', 'bbox')
                      if k in postprocessors.keys())
    coco_evaluator = CocoEvaluator(base_ds, iou_types)
    # coco_evaluator.coco_eval[iou_types[0]].params.iouThrs = [0, 0.1, 0.5, 0.75]

    panoptic_evaluator = None
    if 'panoptic' in postprocessors.keys():
        panoptic_evaluator = PanopticEvaluator(
            data_loader.dataset.ann_file,
            data_loader.dataset.ann_folder,
            output_dir=os.path.join(output_dir, "panoptic_eval"),
        )

    for samples, targets in metric_logger.log_every(data_loader, 10, header):
        samples = samples.to(device)
        targets = [{k: v.to(device) for k, v in t.items()} for t in targets]

        outputs = model(samples)
        loss_dict = criterion(outputs, targets)
        weight_dict = criterion.weight_dict

        # reduce losses over all GPUs for logging purposes
        loss_dict_reduced = utils.reduce_dict(loss_dict)
        loss_dict_reduced_scaled = {
            k: v * weight_dict[k]
            for k, v in loss_dict_reduced.items() if k in weight_dict
        }
        loss_dict_reduced_unscaled = {
            f'{k}_unscaled': v
            for k, v in loss_dict_reduced.items()
        }
        metric_logger.update(loss=sum(loss_dict_reduced_scaled.values()),
                             **loss_dict_reduced_scaled,
                             **loss_dict_reduced_unscaled)
        metric_logger.update(class_error=loss_dict_reduced['class_error'])

        orig_target_sizes = torch.stack([t["orig_size"] for t in targets],
                                        dim=0)
        results = postprocessors['bbox'](outputs, orig_target_sizes)
        if 'segm' in postprocessors.keys():
            target_sizes = torch.stack([t["size"] for t in targets], dim=0)
            results = postprocessors['segm'](results, outputs,
                                             orig_target_sizes, target_sizes)
        res = {
            target['image_id'].item(): output
            for target, output in zip(targets, results)
        }
        if coco_evaluator is not None:
            coco_evaluator.update(res)

        if panoptic_evaluator is not None:
            res_pano = postprocessors["panoptic"](outputs, target_sizes,
                                                  orig_target_sizes)
            for i, target in enumerate(targets):
                image_id = target["image_id"].item()
                file_name = f"{image_id:012d}.png"
                res_pano[i]["image_id"] = image_id
                res_pano[i]["file_name"] = file_name

            panoptic_evaluator.update(res_pano)

    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    if coco_evaluator is not None:
        coco_evaluator.synchronize_between_processes()
    if panoptic_evaluator is not None:
        panoptic_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    if coco_evaluator is not None:
        coco_evaluator.accumulate()
        coco_evaluator.summarize()
    panoptic_res = None
    if panoptic_evaluator is not None:
        panoptic_res = panoptic_evaluator.summarize()
    stats = {k: meter.global_avg for k, meter in metric_logger.meters.items()}
    if coco_evaluator is not None:
        if 'bbox' in postprocessors.keys():
            stats['coco_eval_bbox'] = coco_evaluator.coco_eval[
                'bbox'].stats.tolist()
        if 'segm' in postprocessors.keys():
            stats['coco_eval_masks'] = coco_evaluator.coco_eval[
                'segm'].stats.tolist()
    if panoptic_res is not None:
        stats['PQ_all'] = panoptic_res["All"]
        stats['PQ_th'] = panoptic_res["Things"]
        stats['PQ_st'] = panoptic_res["Stuff"]
    return stats, coco_evaluator