Exemple #1
0
def evaluate_hoi(dataset_file, model, postprocessors, data_loader,
                 subject_category_id, device):
    model.eval()

    metric_logger = utils.MetricLogger(delimiter="  ")
    header = 'Test:'

    preds = []
    gts = []
    indices = []
    for samples, targets in metric_logger.log_every(data_loader, 10, header):
        samples = samples.to(device)

        outputs = model(samples)
        orig_target_sizes = torch.stack([t["orig_size"] for t in targets],
                                        dim=0)
        results = postprocessors['hoi'](outputs, orig_target_sizes)

        preds.extend(
            list(itertools.chain.from_iterable(utils.all_gather(results))))
        # For avoiding a runtime error, the copy is used
        gts.extend(
            list(
                itertools.chain.from_iterable(
                    utils.all_gather(copy.deepcopy(targets)))))

    # gather the stats from all processes
    metric_logger.synchronize_between_processes()

    img_ids = [img_gts['id'] for img_gts in gts]
    _, indices = np.unique(img_ids, return_index=True)
    preds = [img_preds for i, img_preds in enumerate(preds) if i in indices]
    gts = [img_gts for i, img_gts in enumerate(gts) if i in indices]

    if dataset_file == 'hico':
        evaluator = HICOEvaluator(preds, gts, subject_category_id,
                                  data_loader.dataset.rare_triplets,
                                  data_loader.dataset.non_rare_triplets,
                                  data_loader.dataset.correct_mat)
    elif dataset_file == 'hico_second':
        evaluator = HICOEvaluator(preds, gts, subject_category_id,
                                  data_loader.dataset.rare_triplets,
                                  data_loader.dataset.non_rare_triplets,
                                  data_loader.dataset.correct_mat)
    elif dataset_file == 'vcoco':
        evaluator = VCOCOEvaluator(preds, gts, subject_category_id,
                                   data_loader.dataset.correct_mat)

    stats = evaluator.evaluate()

    return stats
def merge(img_ids, eval_imgs):
    all_img_ids = all_gather(img_ids)
    all_eval_imgs = all_gather(eval_imgs)
    merged_img_ids = []
    for p in all_img_ids:
        merged_img_ids.extend(p)
    merged_eval_imgs = []
    for p in all_eval_imgs:
        merged_eval_imgs.append(p)
    merged_img_ids = np.array(merged_img_ids)
    merged_eval_imgs = np.concatenate(merged_eval_imgs, 2)
    merged_img_ids, idx = np.unique(merged_img_ids, return_index=True)
    merged_eval_imgs = merged_eval_imgs[..., idx]
    return merged_img_ids, merged_eval_imgs
Exemple #3
0
def evaluate3d(model, postprocessors, data_loader, device, epoch):
    model.eval()
    anet_results = []
    video_ids = data_loader.dataset.video_idx
    label2name = data_loader.dataset.label2name
    root = data_loader.dataset.root

    for samples, index in data_loader:
        samples = samples.to(device)
        sizes = (~samples.mask).float().sum(dim=-1, keepdim=True)

        outputs = model(samples)
        results = postprocessors['bbox'](outputs, sizes)

        for idx, res in zip(index, results):
            scores = res['scores']
            labels = res['labels']
            boxes = res['boxes']

            new_res = []
            for score, label, box in zip(scores, labels, boxes):
                score = float(score.cpu().numpy())
                label = label2name[int(label.cpu().numpy())]
                box = [float(x) for x in list(box.cpu().numpy())]
                new_res.append({
                    'score': score,
                    'label': label,
                    'segment': box
                })
            video_id = video_ids[idx]

            anet_results.append({video_id: new_res})

    all_results = all_gather(anet_results)

    merged_results = {}
    for p in all_results:
        for item in p:
            merged_results.update(item)

    prediction_filename = os.path.join(root, 'predictions',
                                       f'prediction_{epoch}.json')
    with open(prediction_filename, 'w') as f:
        json.dump({'results': merged_results}, f)
Exemple #4
0
 def synchronize_between_processes(self):
     all_predictions = utils.all_gather(self.predictions)
     merged_predictions = []
     for p in all_predictions:
         merged_predictions += p
     self.predictions = merged_predictions