Beispiel #1
0
def voc_evaluate(model, data_loader, device):
    n_threads = torch.get_num_threads()
    torch.set_num_threads(1)
    cpu_device = torch.device("cpu")
    model.eval()
    metric_logger = utils.MetricLogger(delimiter="  ")
    header = 'Test:'

    all_boxes = [[] for i in range(21)]
    image_index = []
    for image, targets in metric_logger.log_every(data_loader, 100, header):
        image = list(img.to(device) for img in image)
        targets = [{k: v.to(device) for k, v in t.items()} for t in targets]

        torch.cuda.synchronize()
        model_time = time.time()
        outputs = model(image)

        name = ''.join([chr(i) for i in targets[0]['name'].tolist()])
        image_index.append(name)

        outputs = [{k: v.to(cpu_device) for k, v in t.items()} for t in outputs]

        image_boxes = [[] for i in range(21)]
        for o in outputs:
            for i in range(o['boxes'].shape[0]):
                image_boxes[o['labels'][i]].extend([
                    torch.cat([o['boxes'][i],o['scores'][i].unsqueeze(0)], dim=0)
                ])

        #makes sure that the all_boxes is filled with empty array when
        #there are no boxes in image_boxes
        for i in range(21):
            if image_boxes[i] != []:
                all_boxes[i].append([torch.stack(image_boxes[i])])
            else:
                all_boxes[i].append([])

        model_time = time.time() - model_time

    metric_logger.synchronize_between_processes()

    all_boxes_gathered = utils.all_gather(all_boxes)
    image_index_gathered = utils.all_gather(image_index)
    
    # results from all processes are gathered here
    if utils.is_main_process():
        all_boxes = [[] for i in range(21)]
        for abgs in all_boxes_gathered:
            for ab,abg in zip(all_boxes,abgs):
                ab += abg
        image_index = []
        for iig in image_index_gathered:
            image_index+=iig

        _write_voc_results_file(all_boxes,image_index, data_loader.dataset.root, 
                                data_loader.dataset._transforms.transforms[0].CLASSES)
        _do_python_eval(data_loader)
    torch.set_num_threads(n_threads)
Beispiel #2
0
    def _val_step(features, labels, metrics):
        val_logits = model(features, training=False)
        val_loss = _replica_loss(labels, val_logits)
        val_loss = utils.all_reduce(val_loss,
                                    combiner="sum",
                                    comm_options=comm_options)

        labels = tf.identity(labels)
        val_logits = utils.all_gather(val_logits,
                                      axis=0,
                                      comm_options=comm_options)
        labels = utils.all_gather(labels, axis=0, comm_options=comm_options)

        return val_logits, labels, val_loss
Beispiel #3
0
def merge(img_ids, eval_imgs):
    all_img_ids = utils.all_gather(img_ids)
    all_eval_imgs = utils.all_gather(eval_imgs)

    merged_img_ids = []
    for p in all_img_ids:
        merged_img_ids.extend(p)

    merged_eval_imgs = []
    for p in all_eval_imgs:
        merged_eval_imgs.append(p)

    merged_img_ids = np.array(merged_img_ids)
    merged_eval_imgs = np.concatenate(merged_eval_imgs, 2)

    # keep only unique (and in sorted order) images
    merged_img_ids, idx = np.unique(merged_img_ids, return_index=True)
    merged_eval_imgs = merged_eval_imgs[..., idx]

    return merged_img_ids, merged_eval_imgs
Beispiel #4
0
def accumulate_predictions_from_multiple_gpus(predictions_per_gpu):
    all_predictions = all_gather(predictions_per_gpu)
    if not is_main_process():
        return
    # merge the list of dicts
    predictions = {}
    for p in all_predictions:
        predictions.update(p)
    # convert a dict where the key is the index in a list
    image_ids = list(sorted(predictions.keys()))
    if len(image_ids) != image_ids[-1] + 1:
        logger = logging.getLogger("RetinaNet.inference")
        logger.warning(
            "Number of images that were gathered from multiple processes is not "
            "a contiguous set. Some images might be missing from the evaluation"
        )

    # convert to a list
    predictions = [predictions[i] for i in image_ids]
    return predictions